Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/ObjectFile.h | //===- ObjectFile.h - File format independent object file -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares a file format independent ObjectFile class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_OBJECTFILE_H
#define LLVM_OBJECT_OBJECTFILE_H
#include "llvm/ADT/StringRef.h"
#include "llvm/Object/SymbolicFile.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include <cstring>
#include <vector>
namespace llvm {
namespace object {
class ObjectFile;
class COFFObjectFile;
class MachOObjectFile;
class SymbolRef;
class symbol_iterator;
class SectionRef;
typedef content_iterator<SectionRef> section_iterator;
/// This is a value type class that represents a single relocation in the list
/// of relocations in the object file.
class RelocationRef {
DataRefImpl RelocationPimpl;
const ObjectFile *OwningObject;
public:
RelocationRef() : OwningObject(nullptr) { }
RelocationRef(DataRefImpl RelocationP, const ObjectFile *Owner);
bool operator==(const RelocationRef &Other) const;
void moveNext();
uint64_t getOffset() const;
symbol_iterator getSymbol() const;
uint64_t getType() const;
/// @brief Get a string that represents the type of this relocation.
///
/// This is for display purposes only.
void getTypeName(SmallVectorImpl<char> &Result) const;
DataRefImpl getRawDataRefImpl() const;
const ObjectFile *getObject() const;
};
typedef content_iterator<RelocationRef> relocation_iterator;
/// This is a value type class that represents a single section in the list of
/// sections in the object file.
class SectionRef {
friend class SymbolRef;
DataRefImpl SectionPimpl;
const ObjectFile *OwningObject;
public:
SectionRef() : OwningObject(nullptr) { }
SectionRef(DataRefImpl SectionP, const ObjectFile *Owner);
bool operator==(const SectionRef &Other) const;
bool operator!=(const SectionRef &Other) const;
bool operator<(const SectionRef &Other) const;
void moveNext();
std::error_code getName(StringRef &Result) const;
uint64_t getAddress() const;
uint64_t getSize() const;
std::error_code getContents(StringRef &Result) const;
/// @brief Get the alignment of this section as the actual value (not log 2).
uint64_t getAlignment() const;
bool isText() const;
bool isData() const;
bool isBSS() const;
bool isVirtual() const;
bool containsSymbol(SymbolRef S) const;
relocation_iterator relocation_begin() const;
relocation_iterator relocation_end() const;
iterator_range<relocation_iterator> relocations() const {
return iterator_range<relocation_iterator>(relocation_begin(),
relocation_end());
}
section_iterator getRelocatedSection() const;
DataRefImpl getRawDataRefImpl() const;
const ObjectFile *getObject() const;
};
/// This is a value type class that represents a single symbol in the list of
/// symbols in the object file.
class SymbolRef : public BasicSymbolRef {
friend class SectionRef;
public:
SymbolRef() : BasicSymbolRef() {}
enum Type {
ST_Unknown, // Type not specified
ST_Data,
ST_Debug,
ST_File,
ST_Function,
ST_Other
};
SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner);
SymbolRef(const BasicSymbolRef &B) : BasicSymbolRef(B) {
assert(isa<ObjectFile>(BasicSymbolRef::getObject()));
}
ErrorOr<StringRef> getName() const;
/// Returns the symbol virtual address (i.e. address at which it will be
/// mapped).
ErrorOr<uint64_t> getAddress() const;
/// Return the value of the symbol depending on the object this can be an
/// offset or a virtual address.
uint64_t getValue() const;
/// @brief Get the alignment of this symbol as the actual value (not log 2).
uint32_t getAlignment() const;
uint64_t getCommonSize() const;
SymbolRef::Type getType() const;
/// @brief Get section this symbol is defined in reference to. Result is
/// end_sections() if it is undefined or is an absolute symbol.
std::error_code getSection(section_iterator &Result) const;
const ObjectFile *getObject() const;
};
class symbol_iterator : public basic_symbol_iterator {
public:
symbol_iterator(SymbolRef Sym) : basic_symbol_iterator(Sym) {}
symbol_iterator(const basic_symbol_iterator &B)
: basic_symbol_iterator(SymbolRef(B->getRawDataRefImpl(),
cast<ObjectFile>(B->getObject()))) {}
const SymbolRef *operator->() const {
const BasicSymbolRef &P = basic_symbol_iterator::operator *();
return static_cast<const SymbolRef*>(&P);
}
const SymbolRef &operator*() const {
const BasicSymbolRef &P = basic_symbol_iterator::operator *();
return static_cast<const SymbolRef&>(P);
}
};
/// This class is the base class for all object file types. Concrete instances
/// of this object are created by createObjectFile, which figures out which type
/// to create.
class ObjectFile : public SymbolicFile {
virtual void anchor();
ObjectFile() = delete;
ObjectFile(const ObjectFile &other) = delete;
protected:
ObjectFile(unsigned int Type, MemoryBufferRef Source);
const uint8_t *base() const {
return reinterpret_cast<const uint8_t *>(Data.getBufferStart());
}
// These functions are for SymbolRef to call internally. The main goal of
// this is to allow SymbolRef::SymbolPimpl to point directly to the symbol
// entry in the memory mapped object file. SymbolPimpl cannot contain any
// virtual functions because then it could not point into the memory mapped
// file.
//
// Implementations assume that the DataRefImpl is valid and has not been
// modified externally. It's UB otherwise.
friend class SymbolRef;
virtual ErrorOr<StringRef> getSymbolName(DataRefImpl Symb) const = 0;
std::error_code printSymbolName(raw_ostream &OS,
DataRefImpl Symb) const override;
virtual ErrorOr<uint64_t> getSymbolAddress(DataRefImpl Symb) const = 0;
virtual uint64_t getSymbolValueImpl(DataRefImpl Symb) const = 0;
virtual uint32_t getSymbolAlignment(DataRefImpl Symb) const;
virtual uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const = 0;
virtual SymbolRef::Type getSymbolType(DataRefImpl Symb) const = 0;
virtual std::error_code getSymbolSection(DataRefImpl Symb,
section_iterator &Res) const = 0;
// Same as above for SectionRef.
friend class SectionRef;
virtual void moveSectionNext(DataRefImpl &Sec) const = 0;
virtual std::error_code getSectionName(DataRefImpl Sec,
StringRef &Res) const = 0;
virtual uint64_t getSectionAddress(DataRefImpl Sec) const = 0;
virtual uint64_t getSectionSize(DataRefImpl Sec) const = 0;
virtual std::error_code getSectionContents(DataRefImpl Sec,
StringRef &Res) const = 0;
virtual uint64_t getSectionAlignment(DataRefImpl Sec) const = 0;
virtual bool isSectionText(DataRefImpl Sec) const = 0;
virtual bool isSectionData(DataRefImpl Sec) const = 0;
virtual bool isSectionBSS(DataRefImpl Sec) const = 0;
// A section is 'virtual' if its contents aren't present in the object image.
virtual bool isSectionVirtual(DataRefImpl Sec) const = 0;
virtual relocation_iterator section_rel_begin(DataRefImpl Sec) const = 0;
virtual relocation_iterator section_rel_end(DataRefImpl Sec) const = 0;
virtual section_iterator getRelocatedSection(DataRefImpl Sec) const;
// Same as above for RelocationRef.
friend class RelocationRef;
virtual void moveRelocationNext(DataRefImpl &Rel) const = 0;
virtual uint64_t getRelocationOffset(DataRefImpl Rel) const = 0;
virtual symbol_iterator getRelocationSymbol(DataRefImpl Rel) const = 0;
virtual uint64_t getRelocationType(DataRefImpl Rel) const = 0;
virtual void getRelocationTypeName(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const = 0;
uint64_t getSymbolValue(DataRefImpl Symb) const;
public:
uint64_t getCommonSymbolSize(DataRefImpl Symb) const {
assert(getSymbolFlags(Symb) & SymbolRef::SF_Common);
return getCommonSymbolSizeImpl(Symb);
}
typedef iterator_range<symbol_iterator> symbol_iterator_range;
symbol_iterator_range symbols() const {
return symbol_iterator_range(symbol_begin(), symbol_end());
}
virtual section_iterator section_begin() const = 0;
virtual section_iterator section_end() const = 0;
typedef iterator_range<section_iterator> section_iterator_range;
section_iterator_range sections() const {
return section_iterator_range(section_begin(), section_end());
}
/// @brief The number of bytes used to represent an address in this object
/// file format.
virtual uint8_t getBytesInAddress() const = 0;
virtual StringRef getFileFormatName() const = 0;
virtual /* Triple::ArchType */ unsigned getArch() const = 0;
/// Returns platform-specific object flags, if any.
virtual std::error_code getPlatformFlags(unsigned &Result) const {
Result = 0;
return object_error::invalid_file_type;
}
/// True if this is a relocatable object (.o/.obj).
virtual bool isRelocatableObject() const = 0;
/// @returns Pointer to ObjectFile subclass to handle this type of object.
/// @param ObjectPath The path to the object file. ObjectPath.isObject must
/// return true.
/// @brief Create ObjectFile from path.
static ErrorOr<OwningBinary<ObjectFile>>
createObjectFile(StringRef ObjectPath);
static ErrorOr<std::unique_ptr<ObjectFile>>
createObjectFile(MemoryBufferRef Object, sys::fs::file_magic Type);
static ErrorOr<std::unique_ptr<ObjectFile>>
createObjectFile(MemoryBufferRef Object) {
return createObjectFile(Object, sys::fs::file_magic::unknown);
}
static inline bool classof(const Binary *v) {
return v->isObject();
}
static ErrorOr<std::unique_ptr<COFFObjectFile>>
createCOFFObjectFile(MemoryBufferRef Object);
static ErrorOr<std::unique_ptr<ObjectFile>>
createELFObjectFile(MemoryBufferRef Object);
static ErrorOr<std::unique_ptr<MachOObjectFile>>
createMachOObjectFile(MemoryBufferRef Object);
};
// Inline function definitions.
inline SymbolRef::SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner)
: BasicSymbolRef(SymbolP, Owner) {}
inline ErrorOr<StringRef> SymbolRef::getName() const {
return getObject()->getSymbolName(getRawDataRefImpl());
}
inline ErrorOr<uint64_t> SymbolRef::getAddress() const {
return getObject()->getSymbolAddress(getRawDataRefImpl());
}
inline uint64_t SymbolRef::getValue() const {
return getObject()->getSymbolValue(getRawDataRefImpl());
}
inline uint32_t SymbolRef::getAlignment() const {
return getObject()->getSymbolAlignment(getRawDataRefImpl());
}
inline uint64_t SymbolRef::getCommonSize() const {
return getObject()->getCommonSymbolSize(getRawDataRefImpl());
}
inline std::error_code SymbolRef::getSection(section_iterator &Result) const {
return getObject()->getSymbolSection(getRawDataRefImpl(), Result);
}
inline SymbolRef::Type SymbolRef::getType() const {
return getObject()->getSymbolType(getRawDataRefImpl());
}
inline const ObjectFile *SymbolRef::getObject() const {
const SymbolicFile *O = BasicSymbolRef::getObject();
return cast<ObjectFile>(O);
}
/// SectionRef
inline SectionRef::SectionRef(DataRefImpl SectionP,
const ObjectFile *Owner)
: SectionPimpl(SectionP)
, OwningObject(Owner) {}
inline bool SectionRef::operator==(const SectionRef &Other) const {
return SectionPimpl == Other.SectionPimpl;
}
inline bool SectionRef::operator!=(const SectionRef &Other) const {
return SectionPimpl != Other.SectionPimpl;
}
inline bool SectionRef::operator<(const SectionRef &Other) const {
return SectionPimpl < Other.SectionPimpl;
}
inline void SectionRef::moveNext() {
return OwningObject->moveSectionNext(SectionPimpl);
}
inline std::error_code SectionRef::getName(StringRef &Result) const {
return OwningObject->getSectionName(SectionPimpl, Result);
}
inline uint64_t SectionRef::getAddress() const {
return OwningObject->getSectionAddress(SectionPimpl);
}
inline uint64_t SectionRef::getSize() const {
return OwningObject->getSectionSize(SectionPimpl);
}
inline std::error_code SectionRef::getContents(StringRef &Result) const {
return OwningObject->getSectionContents(SectionPimpl, Result);
}
inline uint64_t SectionRef::getAlignment() const {
return OwningObject->getSectionAlignment(SectionPimpl);
}
inline bool SectionRef::isText() const {
return OwningObject->isSectionText(SectionPimpl);
}
inline bool SectionRef::isData() const {
return OwningObject->isSectionData(SectionPimpl);
}
inline bool SectionRef::isBSS() const {
return OwningObject->isSectionBSS(SectionPimpl);
}
inline bool SectionRef::isVirtual() const {
return OwningObject->isSectionVirtual(SectionPimpl);
}
inline relocation_iterator SectionRef::relocation_begin() const {
return OwningObject->section_rel_begin(SectionPimpl);
}
inline relocation_iterator SectionRef::relocation_end() const {
return OwningObject->section_rel_end(SectionPimpl);
}
inline section_iterator SectionRef::getRelocatedSection() const {
return OwningObject->getRelocatedSection(SectionPimpl);
}
inline DataRefImpl SectionRef::getRawDataRefImpl() const {
return SectionPimpl;
}
inline const ObjectFile *SectionRef::getObject() const {
return OwningObject;
}
/// RelocationRef
inline RelocationRef::RelocationRef(DataRefImpl RelocationP,
const ObjectFile *Owner)
: RelocationPimpl(RelocationP)
, OwningObject(Owner) {}
inline bool RelocationRef::operator==(const RelocationRef &Other) const {
return RelocationPimpl == Other.RelocationPimpl;
}
inline void RelocationRef::moveNext() {
return OwningObject->moveRelocationNext(RelocationPimpl);
}
inline uint64_t RelocationRef::getOffset() const {
return OwningObject->getRelocationOffset(RelocationPimpl);
}
inline symbol_iterator RelocationRef::getSymbol() const {
return OwningObject->getRelocationSymbol(RelocationPimpl);
}
inline uint64_t RelocationRef::getType() const {
return OwningObject->getRelocationType(RelocationPimpl);
}
inline void RelocationRef::getTypeName(SmallVectorImpl<char> &Result) const {
return OwningObject->getRelocationTypeName(RelocationPimpl, Result);
}
inline DataRefImpl RelocationRef::getRawDataRefImpl() const {
return RelocationPimpl;
}
inline const ObjectFile *RelocationRef::getObject() const {
return OwningObject;
}
} // end namespace object
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/RelocVisitor.h | //===-- RelocVisitor.h - Visitor for object file relocations -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides a wrapper around all the different types of relocations
// in different file formats, such that a client can handle them in a unified
// manner by only implementing a minimal number of functions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_RELOCVISITOR_H
#define LLVM_OBJECT_RELOCVISITOR_H
#include "llvm/ADT/StringRef.h"
#include "llvm/Object/COFF.h"
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Object/MachO.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/MachO.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
namespace object {
struct RelocToApply {
// The computed value after applying the relevant relocations.
int64_t Value;
// The width of the value; how many bytes to touch when applying the
// relocation.
char Width;
RelocToApply(int64_t Value, char Width) : Value(Value), Width(Width) {}
RelocToApply() : Value(0), Width(0) {}
};
/// @brief Base class for object file relocation visitors.
class RelocVisitor {
public:
explicit RelocVisitor(const ObjectFile &Obj)
: ObjToVisit(Obj), HasError(false) {}
// TODO: Should handle multiple applied relocations via either passing in the
// previously computed value or just count paired relocations as a single
// visit.
RelocToApply visit(uint32_t RelocType, RelocationRef R, uint64_t Value = 0) {
if (isa<ELFObjectFileBase>(ObjToVisit))
return visitELF(RelocType, R, Value);
if (isa<COFFObjectFile>(ObjToVisit))
return visitCOFF(RelocType, R, Value);
if (isa<MachOObjectFile>(ObjToVisit))
return visitMachO(RelocType, R, Value);
HasError = true;
return RelocToApply();
}
bool error() { return HasError; }
private:
const ObjectFile &ObjToVisit;
bool HasError;
RelocToApply visitELF(uint32_t RelocType, RelocationRef R, uint64_t Value) {
if (ObjToVisit.getBytesInAddress() == 8) { // 64-bit object file
switch (ObjToVisit.getArch()) {
case Triple::x86_64:
switch (RelocType) {
case llvm::ELF::R_X86_64_NONE:
return visitELF_X86_64_NONE(R);
case llvm::ELF::R_X86_64_64:
return visitELF_X86_64_64(R, Value);
case llvm::ELF::R_X86_64_PC32:
return visitELF_X86_64_PC32(R, Value);
case llvm::ELF::R_X86_64_32:
return visitELF_X86_64_32(R, Value);
case llvm::ELF::R_X86_64_32S:
return visitELF_X86_64_32S(R, Value);
default:
HasError = true;
return RelocToApply();
}
case Triple::aarch64:
switch (RelocType) {
case llvm::ELF::R_AARCH64_ABS32:
return visitELF_AARCH64_ABS32(R, Value);
case llvm::ELF::R_AARCH64_ABS64:
return visitELF_AARCH64_ABS64(R, Value);
default:
HasError = true;
return RelocToApply();
}
case Triple::mips64el:
case Triple::mips64:
switch (RelocType) {
case llvm::ELF::R_MIPS_32:
return visitELF_MIPS64_32(R, Value);
case llvm::ELF::R_MIPS_64:
return visitELF_MIPS64_64(R, Value);
default:
HasError = true;
return RelocToApply();
}
case Triple::ppc64le:
case Triple::ppc64:
switch (RelocType) {
case llvm::ELF::R_PPC64_ADDR32:
return visitELF_PPC64_ADDR32(R, Value);
case llvm::ELF::R_PPC64_ADDR64:
return visitELF_PPC64_ADDR64(R, Value);
default:
HasError = true;
return RelocToApply();
}
case Triple::systemz:
switch (RelocType) {
case llvm::ELF::R_390_32:
return visitELF_390_32(R, Value);
case llvm::ELF::R_390_64:
return visitELF_390_64(R, Value);
default:
HasError = true;
return RelocToApply();
}
case Triple::sparcv9:
switch (RelocType) {
case llvm::ELF::R_SPARC_32:
case llvm::ELF::R_SPARC_UA32:
return visitELF_SPARCV9_32(R, Value);
case llvm::ELF::R_SPARC_64:
case llvm::ELF::R_SPARC_UA64:
return visitELF_SPARCV9_64(R, Value);
default:
HasError = true;
return RelocToApply();
}
default:
HasError = true;
return RelocToApply();
}
} else if (ObjToVisit.getBytesInAddress() == 4) { // 32-bit object file
switch (ObjToVisit.getArch()) {
case Triple::x86:
switch (RelocType) {
case llvm::ELF::R_386_NONE:
return visitELF_386_NONE(R);
case llvm::ELF::R_386_32:
return visitELF_386_32(R, Value);
case llvm::ELF::R_386_PC32:
return visitELF_386_PC32(R, Value);
default:
HasError = true;
return RelocToApply();
}
case Triple::ppc:
switch (RelocType) {
case llvm::ELF::R_PPC_ADDR32:
return visitELF_PPC_ADDR32(R, Value);
default:
HasError = true;
return RelocToApply();
}
case Triple::arm:
case Triple::armeb:
switch (RelocType) {
default:
HasError = true;
return RelocToApply();
case llvm::ELF::R_ARM_ABS32:
return visitELF_ARM_ABS32(R, Value);
}
case Triple::mipsel:
case Triple::mips:
switch (RelocType) {
case llvm::ELF::R_MIPS_32:
return visitELF_MIPS_32(R, Value);
default:
HasError = true;
return RelocToApply();
}
case Triple::sparc:
switch (RelocType) {
case llvm::ELF::R_SPARC_32:
case llvm::ELF::R_SPARC_UA32:
return visitELF_SPARC_32(R, Value);
default:
HasError = true;
return RelocToApply();
}
default:
HasError = true;
return RelocToApply();
}
} else {
report_fatal_error("Invalid word size in object file");
}
}
RelocToApply visitCOFF(uint32_t RelocType, RelocationRef R, uint64_t Value) {
switch (ObjToVisit.getArch()) {
case Triple::x86:
switch (RelocType) {
case COFF::IMAGE_REL_I386_SECREL:
return visitCOFF_I386_SECREL(R, Value);
case COFF::IMAGE_REL_I386_DIR32:
return visitCOFF_I386_DIR32(R, Value);
}
break;
case Triple::x86_64:
switch (RelocType) {
case COFF::IMAGE_REL_AMD64_SECREL:
return visitCOFF_AMD64_SECREL(R, Value);
case COFF::IMAGE_REL_AMD64_ADDR64:
return visitCOFF_AMD64_ADDR64(R, Value);
}
break;
}
HasError = true;
return RelocToApply();
}
RelocToApply visitMachO(uint32_t RelocType, RelocationRef R, uint64_t Value) {
switch (ObjToVisit.getArch()) {
default: break;
case Triple::x86_64:
switch (RelocType) {
default: break;
case MachO::X86_64_RELOC_UNSIGNED:
return visitMACHO_X86_64_UNSIGNED(R, Value);
}
}
HasError = true;
return RelocToApply();
}
int64_t getELFAddend(RelocationRef R) {
ErrorOr<int64_t> AddendOrErr = ELFRelocationRef(R).getAddend();
if (std::error_code EC = AddendOrErr.getError())
report_fatal_error(EC.message());
return *AddendOrErr;
}
uint8_t getLengthMachO64(RelocationRef R) {
const MachOObjectFile *Obj = cast<MachOObjectFile>(R.getObject());
return Obj->getRelocationLength(R.getRawDataRefImpl());
}
/// Operations
/// 386-ELF
RelocToApply visitELF_386_NONE(RelocationRef R) {
return RelocToApply(0, 0);
}
// Ideally the Addend here will be the addend in the data for
// the relocation. It's not actually the case for Rel relocations.
RelocToApply visitELF_386_32(RelocationRef R, uint64_t Value) {
return RelocToApply(Value, 4);
}
RelocToApply visitELF_386_PC32(RelocationRef R, uint64_t Value) {
uint64_t Address = R.getOffset();
return RelocToApply(Value - Address, 4);
}
/// X86-64 ELF
RelocToApply visitELF_X86_64_NONE(RelocationRef R) {
return RelocToApply(0, 0);
}
RelocToApply visitELF_X86_64_64(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 8);
}
RelocToApply visitELF_X86_64_PC32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
uint64_t Address = R.getOffset();
return RelocToApply(Value + Addend - Address, 4);
}
RelocToApply visitELF_X86_64_32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
return RelocToApply(Res, 4);
}
RelocToApply visitELF_X86_64_32S(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
int32_t Res = (Value + Addend) & 0xFFFFFFFF;
return RelocToApply(Res, 4);
}
/// PPC64 ELF
RelocToApply visitELF_PPC64_ADDR32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
return RelocToApply(Res, 4);
}
RelocToApply visitELF_PPC64_ADDR64(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 8);
}
/// PPC32 ELF
RelocToApply visitELF_PPC_ADDR32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
return RelocToApply(Res, 4);
}
/// MIPS ELF
RelocToApply visitELF_MIPS_32(RelocationRef R, uint64_t Value) {
uint32_t Res = Value & 0xFFFFFFFF;
return RelocToApply(Res, 4);
}
/// MIPS64 ELF
RelocToApply visitELF_MIPS64_32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
return RelocToApply(Res, 4);
}
RelocToApply visitELF_MIPS64_64(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
uint64_t Res = (Value + Addend);
return RelocToApply(Res, 8);
}
// AArch64 ELF
RelocToApply visitELF_AARCH64_ABS32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
int64_t Res = Value + Addend;
// Overflow check allows for both signed and unsigned interpretation.
if (Res < INT32_MIN || Res > UINT32_MAX)
HasError = true;
return RelocToApply(static_cast<uint32_t>(Res), 4);
}
RelocToApply visitELF_AARCH64_ABS64(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 8);
}
// SystemZ ELF
RelocToApply visitELF_390_32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
int64_t Res = Value + Addend;
// Overflow check allows for both signed and unsigned interpretation.
if (Res < INT32_MIN || Res > UINT32_MAX)
HasError = true;
return RelocToApply(static_cast<uint32_t>(Res), 4);
}
RelocToApply visitELF_390_64(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 8);
}
RelocToApply visitELF_SPARC_32(RelocationRef R, uint32_t Value) {
int32_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 4);
}
RelocToApply visitELF_SPARCV9_32(RelocationRef R, uint64_t Value) {
int32_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 4);
}
RelocToApply visitELF_SPARCV9_64(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 8);
}
RelocToApply visitELF_ARM_ABS32(RelocationRef R, uint64_t Value) {
int64_t Res = Value;
// Overflow check allows for both signed and unsigned interpretation.
if (Res < INT32_MIN || Res > UINT32_MAX)
HasError = true;
return RelocToApply(static_cast<uint32_t>(Res), 4);
}
/// I386 COFF
RelocToApply visitCOFF_I386_SECREL(RelocationRef R, uint64_t Value) {
return RelocToApply(static_cast<uint32_t>(Value), /*Width=*/4);
}
RelocToApply visitCOFF_I386_DIR32(RelocationRef R, uint64_t Value) {
return RelocToApply(static_cast<uint32_t>(Value), /*Width=*/4);
}
/// AMD64 COFF
RelocToApply visitCOFF_AMD64_SECREL(RelocationRef R, uint64_t Value) {
return RelocToApply(static_cast<uint32_t>(Value), /*Width=*/4);
}
RelocToApply visitCOFF_AMD64_ADDR64(RelocationRef R, uint64_t Value) {
return RelocToApply(Value, /*Width=*/8);
}
// X86_64 MachO
RelocToApply visitMACHO_X86_64_UNSIGNED(RelocationRef R, uint64_t Value) {
uint8_t Length = getLengthMachO64(R);
Length = 1<<Length;
return RelocToApply(Value, Length);
}
};
}
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/MachOUniversal.h | //===- MachOUniversal.h - Mach-O universal binaries -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares Mach-O fat/universal binaries.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_MACHOUNIVERSAL_H
#define LLVM_OBJECT_MACHOUNIVERSAL_H
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Object/Archive.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/MachO.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/MachO.h"
namespace llvm {
namespace object {
class MachOUniversalBinary : public Binary {
virtual void anchor();
uint32_t NumberOfObjects;
public:
class ObjectForArch {
const MachOUniversalBinary *Parent;
/// \brief Index of object in the universal binary.
uint32_t Index;
/// \brief Descriptor of the object.
MachO::fat_arch Header;
public:
ObjectForArch(const MachOUniversalBinary *Parent, uint32_t Index);
void clear() {
Parent = nullptr;
Index = 0;
}
bool operator==(const ObjectForArch &Other) const {
return (Parent == Other.Parent) && (Index == Other.Index);
}
ObjectForArch getNext() const { return ObjectForArch(Parent, Index + 1); }
uint32_t getCPUType() const { return Header.cputype; }
uint32_t getCPUSubType() const { return Header.cpusubtype; }
uint32_t getOffset() const { return Header.offset; }
uint32_t getSize() const { return Header.size; }
uint32_t getAlign() const { return Header.align; }
std::string getArchTypeName() const {
Triple T = MachOObjectFile::getArch(Header.cputype, Header.cpusubtype);
return T.getArchName();
}
ErrorOr<std::unique_ptr<MachOObjectFile>> getAsObjectFile() const;
ErrorOr<std::unique_ptr<Archive>> getAsArchive() const;
};
class object_iterator {
ObjectForArch Obj;
public:
object_iterator(const ObjectForArch &Obj) : Obj(Obj) {}
const ObjectForArch *operator->() const { return &Obj; }
const ObjectForArch &operator*() const { return Obj; }
bool operator==(const object_iterator &Other) const {
return Obj == Other.Obj;
}
bool operator!=(const object_iterator &Other) const {
return !(*this == Other);
}
object_iterator& operator++() { // Preincrement
Obj = Obj.getNext();
return *this;
}
};
MachOUniversalBinary(MemoryBufferRef Souce, std::error_code &EC);
static ErrorOr<std::unique_ptr<MachOUniversalBinary>>
create(MemoryBufferRef Source);
object_iterator begin_objects() const {
return ObjectForArch(this, 0);
}
object_iterator end_objects() const {
return ObjectForArch(nullptr, 0);
}
iterator_range<object_iterator> objects() const {
return make_range(begin_objects(), end_objects());
}
uint32_t getNumberOfObjects() const { return NumberOfObjects; }
// Cast methods.
static inline bool classof(Binary const *V) {
return V->isMachOUniversalBinary();
}
ErrorOr<std::unique_ptr<MachOObjectFile>>
getObjectForArch(StringRef ArchName) const;
};
}
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/ELFObjectFile.h | //===- ELFObjectFile.h - ELF object file implementation ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the ELFObjectFile template class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_ELFOBJECTFILE_H
#define LLVM_OBJECT_ELFOBJECTFILE_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Object/ELF.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cctype>
#include <limits>
#include <utility>
namespace llvm {
namespace object {
class elf_symbol_iterator;
class ELFSymbolRef;
class ELFRelocationRef;
class ELFObjectFileBase : public ObjectFile {
friend class ELFSymbolRef;
friend class ELFSectionRef;
friend class ELFRelocationRef;
protected:
ELFObjectFileBase(unsigned int Type, MemoryBufferRef Source);
virtual uint64_t getSymbolSize(DataRefImpl Symb) const = 0;
virtual uint8_t getSymbolOther(DataRefImpl Symb) const = 0;
virtual uint8_t getSymbolELFType(DataRefImpl Symb) const = 0;
virtual uint32_t getSectionType(DataRefImpl Sec) const = 0;
virtual uint64_t getSectionFlags(DataRefImpl Sec) const = 0;
virtual ErrorOr<int64_t> getRelocationAddend(DataRefImpl Rel) const = 0;
public:
typedef iterator_range<elf_symbol_iterator> elf_symbol_iterator_range;
virtual elf_symbol_iterator_range getDynamicSymbolIterators() const = 0;
elf_symbol_iterator_range symbols() const;
static inline bool classof(const Binary *v) { return v->isELF(); }
};
class ELFSectionRef : public SectionRef {
public:
ELFSectionRef(const SectionRef &B) : SectionRef(B) {
assert(isa<ELFObjectFileBase>(SectionRef::getObject()));
}
const ELFObjectFileBase *getObject() const {
return cast<ELFObjectFileBase>(SectionRef::getObject());
}
uint32_t getType() const {
return getObject()->getSectionType(getRawDataRefImpl());
}
uint64_t getFlags() const {
return getObject()->getSectionFlags(getRawDataRefImpl());
}
};
class elf_section_iterator : public section_iterator {
public:
elf_section_iterator(const section_iterator &B) : section_iterator(B) {
assert(isa<ELFObjectFileBase>(B->getObject()));
}
const ELFSectionRef *operator->() const {
return static_cast<const ELFSectionRef *>(section_iterator::operator->());
}
const ELFSectionRef &operator*() const {
return static_cast<const ELFSectionRef &>(section_iterator::operator*());
}
};
class ELFSymbolRef : public SymbolRef {
public:
ELFSymbolRef(const SymbolRef &B) : SymbolRef(B) {
assert(isa<ELFObjectFileBase>(SymbolRef::getObject()));
}
const ELFObjectFileBase *getObject() const {
return cast<ELFObjectFileBase>(BasicSymbolRef::getObject());
}
uint64_t getSize() const {
return getObject()->getSymbolSize(getRawDataRefImpl());
}
uint8_t getOther() const {
return getObject()->getSymbolOther(getRawDataRefImpl());
}
uint8_t getELFType() const {
return getObject()->getSymbolELFType(getRawDataRefImpl());
}
};
class elf_symbol_iterator : public symbol_iterator {
public:
elf_symbol_iterator(const basic_symbol_iterator &B)
: symbol_iterator(SymbolRef(B->getRawDataRefImpl(),
cast<ELFObjectFileBase>(B->getObject()))) {}
const ELFSymbolRef *operator->() const {
return static_cast<const ELFSymbolRef *>(symbol_iterator::operator->());
}
const ELFSymbolRef &operator*() const {
return static_cast<const ELFSymbolRef &>(symbol_iterator::operator*());
}
};
class ELFRelocationRef : public RelocationRef {
public:
ELFRelocationRef(const RelocationRef &B) : RelocationRef(B) {
assert(isa<ELFObjectFileBase>(RelocationRef::getObject()));
}
const ELFObjectFileBase *getObject() const {
return cast<ELFObjectFileBase>(RelocationRef::getObject());
}
ErrorOr<int64_t> getAddend() const {
return getObject()->getRelocationAddend(getRawDataRefImpl());
}
};
class elf_relocation_iterator : public relocation_iterator {
public:
elf_relocation_iterator(const relocation_iterator &B)
: relocation_iterator(RelocationRef(
B->getRawDataRefImpl(), cast<ELFObjectFileBase>(B->getObject()))) {}
const ELFRelocationRef *operator->() const {
return static_cast<const ELFRelocationRef *>(
relocation_iterator::operator->());
}
const ELFRelocationRef &operator*() const {
return static_cast<const ELFRelocationRef &>(
relocation_iterator::operator*());
}
};
inline ELFObjectFileBase::elf_symbol_iterator_range
ELFObjectFileBase::symbols() const {
return elf_symbol_iterator_range(symbol_begin(), symbol_end());
}
template <class ELFT> class ELFObjectFile : public ELFObjectFileBase {
uint64_t getSymbolSize(DataRefImpl Sym) const override;
public:
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
typedef typename ELFFile<ELFT>::uintX_t uintX_t;
typedef typename ELFFile<ELFT>::Elf_Sym Elf_Sym;
typedef typename ELFFile<ELFT>::Elf_Shdr Elf_Shdr;
typedef typename ELFFile<ELFT>::Elf_Ehdr Elf_Ehdr;
typedef typename ELFFile<ELFT>::Elf_Rel Elf_Rel;
typedef typename ELFFile<ELFT>::Elf_Rela Elf_Rela;
typedef typename ELFFile<ELFT>::Elf_Dyn Elf_Dyn;
typedef typename ELFFile<ELFT>::Elf_Dyn_Iter Elf_Dyn_Iter;
protected:
ELFFile<ELFT> EF;
void moveSymbolNext(DataRefImpl &Symb) const override;
ErrorOr<StringRef> getSymbolName(DataRefImpl Symb) const override;
ErrorOr<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
uint32_t getSymbolFlags(DataRefImpl Symb) const override;
uint8_t getSymbolOther(DataRefImpl Symb) const override;
uint8_t getSymbolELFType(DataRefImpl Symb) const override;
SymbolRef::Type getSymbolType(DataRefImpl Symb) const override;
section_iterator getSymbolSection(const Elf_Sym *Symb) const;
std::error_code getSymbolSection(DataRefImpl Symb,
section_iterator &Res) const override;
void moveSectionNext(DataRefImpl &Sec) const override;
std::error_code getSectionName(DataRefImpl Sec,
StringRef &Res) const override;
uint64_t getSectionAddress(DataRefImpl Sec) const override;
uint64_t getSectionSize(DataRefImpl Sec) const override;
std::error_code getSectionContents(DataRefImpl Sec,
StringRef &Res) const override;
uint64_t getSectionAlignment(DataRefImpl Sec) const override;
bool isSectionText(DataRefImpl Sec) const override;
bool isSectionData(DataRefImpl Sec) const override;
bool isSectionBSS(DataRefImpl Sec) const override;
bool isSectionVirtual(DataRefImpl Sec) const override;
relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
relocation_iterator section_rel_end(DataRefImpl Sec) const override;
section_iterator getRelocatedSection(DataRefImpl Sec) const override;
void moveRelocationNext(DataRefImpl &Rel) const override;
uint64_t getRelocationOffset(DataRefImpl Rel) const override;
symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
uint64_t getRelocationType(DataRefImpl Rel) const override;
void getRelocationTypeName(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const override;
uint32_t getSectionType(DataRefImpl Sec) const override;
uint64_t getSectionFlags(DataRefImpl Sec) const override;
StringRef getRelocationTypeName(uint32_t Type) const;
/// \brief Get the relocation section that contains \a Rel.
const Elf_Shdr *getRelSection(DataRefImpl Rel) const {
return *EF.getSection(Rel.d.a);
}
const Elf_Sym *toELFSymIter(DataRefImpl Sym) const {
return EF.template getEntry<Elf_Sym>(Sym.d.a, Sym.d.b);
}
DataRefImpl toDRI(const Elf_Shdr *SymTable, unsigned SymbolNum) const {
DataRefImpl DRI;
if (!SymTable) {
DRI.d.a = 0;
DRI.d.b = 0;
return DRI;
}
assert(SymTable->sh_type == ELF::SHT_SYMTAB ||
SymTable->sh_type == ELF::SHT_DYNSYM);
uintptr_t SHT = reinterpret_cast<uintptr_t>(EF.section_begin());
unsigned SymTableIndex =
(reinterpret_cast<uintptr_t>(SymTable) - SHT) / sizeof(Elf_Shdr);
DRI.d.a = SymTableIndex;
DRI.d.b = SymbolNum;
return DRI;
}
const Elf_Shdr *toELFShdrIter(DataRefImpl Sec) const {
return reinterpret_cast<const Elf_Shdr *>(Sec.p);
}
DataRefImpl toDRI(const Elf_Shdr *Sec) const {
DataRefImpl DRI;
DRI.p = reinterpret_cast<uintptr_t>(Sec);
return DRI;
}
DataRefImpl toDRI(Elf_Dyn_Iter Dyn) const {
DataRefImpl DRI;
DRI.p = reinterpret_cast<uintptr_t>(Dyn.get());
return DRI;
}
bool isExportedToOtherDSO(const Elf_Sym *ESym) const {
unsigned char Binding = ESym->getBinding();
unsigned char Visibility = ESym->getVisibility();
// A symbol is exported if its binding is either GLOBAL or WEAK, and its
// visibility is either DEFAULT or PROTECTED. All other symbols are not
// exported.
if ((Binding == ELF::STB_GLOBAL || Binding == ELF::STB_WEAK) &&
(Visibility == ELF::STV_DEFAULT || Visibility == ELF::STV_PROTECTED))
return true;
return false;
}
// This flag is used for classof, to distinguish ELFObjectFile from
// its subclass. If more subclasses will be created, this flag will
// have to become an enum.
bool isDyldELFObject;
public:
ELFObjectFile(MemoryBufferRef Object, std::error_code &EC);
const Elf_Rel *getRel(DataRefImpl Rel) const;
const Elf_Rela *getRela(DataRefImpl Rela) const;
const Elf_Sym *getSymbol(DataRefImpl Symb) const;
basic_symbol_iterator symbol_begin_impl() const override;
basic_symbol_iterator symbol_end_impl() const override;
elf_symbol_iterator dynamic_symbol_begin() const;
elf_symbol_iterator dynamic_symbol_end() const;
section_iterator section_begin() const override;
section_iterator section_end() const override;
ErrorOr<int64_t> getRelocationAddend(DataRefImpl Rel) const override;
uint8_t getBytesInAddress() const override;
StringRef getFileFormatName() const override;
unsigned getArch() const override;
StringRef getLoadName() const;
std::error_code getPlatformFlags(unsigned &Result) const override {
Result = EF.getHeader()->e_flags;
return std::error_code();
}
const ELFFile<ELFT> *getELFFile() const { return &EF; }
bool isDyldType() const { return isDyldELFObject; }
static inline bool classof(const Binary *v) {
return v->getType() == getELFType(ELFT::TargetEndianness == support::little,
ELFT::Is64Bits);
}
elf_symbol_iterator_range getDynamicSymbolIterators() const override;
bool isRelocatableObject() const override;
};
typedef ELFObjectFile<ELFType<support::little, false>> ELF32LEObjectFile;
typedef ELFObjectFile<ELFType<support::little, true>> ELF64LEObjectFile;
typedef ELFObjectFile<ELFType<support::big, false>> ELF32BEObjectFile;
typedef ELFObjectFile<ELFType<support::big, true>> ELF64BEObjectFile;
template <class ELFT>
void ELFObjectFile<ELFT>::moveSymbolNext(DataRefImpl &Sym) const {
++Sym.d.b;
}
template <class ELFT>
ErrorOr<StringRef> ELFObjectFile<ELFT>::getSymbolName(DataRefImpl Sym) const {
const Elf_Sym *ESym = toELFSymIter(Sym);
const Elf_Shdr *SymTableSec = *EF.getSection(Sym.d.a);
const Elf_Shdr *StringTableSec = *EF.getSection(SymTableSec->sh_link);
StringRef SymTable = *EF.getStringTable(StringTableSec);
return ESym->getName(SymTable);
}
template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSectionFlags(DataRefImpl Sec) const {
return toELFShdrIter(Sec)->sh_flags;
}
template <class ELFT>
uint32_t ELFObjectFile<ELFT>::getSectionType(DataRefImpl Sec) const {
return toELFShdrIter(Sec)->sh_type;
}
template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSymbolValueImpl(DataRefImpl Symb) const {
const Elf_Sym *ESym = getSymbol(Symb);
uint64_t Ret = ESym->st_value;
if (ESym->st_shndx == ELF::SHN_ABS)
return Ret;
const Elf_Ehdr *Header = EF.getHeader();
// Clear the ARM/Thumb or microMIPS indicator flag.
if ((Header->e_machine == ELF::EM_ARM || Header->e_machine == ELF::EM_MIPS) &&
ESym->getType() == ELF::STT_FUNC)
Ret &= ~1;
return Ret;
}
template <class ELFT>
ErrorOr<uint64_t>
ELFObjectFile<ELFT>::getSymbolAddress(DataRefImpl Symb) const {
uint64_t Result = getSymbolValue(Symb);
const Elf_Sym *ESym = getSymbol(Symb);
switch (ESym->st_shndx) {
case ELF::SHN_COMMON:
case ELF::SHN_UNDEF:
case ELF::SHN_ABS:
return Result;
}
const Elf_Ehdr *Header = EF.getHeader();
if (Header->e_type == ELF::ET_REL) {
ErrorOr<const Elf_Shdr *> SectionOrErr = EF.getSection(ESym);
if (std::error_code EC = SectionOrErr.getError())
return EC;
const Elf_Shdr *Section = *SectionOrErr;
if (Section)
Result += Section->sh_addr;
}
return Result;
}
template <class ELFT>
uint32_t ELFObjectFile<ELFT>::getSymbolAlignment(DataRefImpl Symb) const {
const Elf_Sym *Sym = toELFSymIter(Symb);
if (Sym->st_shndx == ELF::SHN_COMMON)
return Sym->st_value;
return 0;
}
template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSymbolSize(DataRefImpl Sym) const {
return toELFSymIter(Sym)->st_size;
}
template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getCommonSymbolSizeImpl(DataRefImpl Symb) const {
return toELFSymIter(Symb)->st_size;
}
template <class ELFT>
uint8_t ELFObjectFile<ELFT>::getSymbolOther(DataRefImpl Symb) const {
return toELFSymIter(Symb)->st_other;
}
template <class ELFT>
uint8_t ELFObjectFile<ELFT>::getSymbolELFType(DataRefImpl Symb) const {
return toELFSymIter(Symb)->getType();
}
template <class ELFT>
SymbolRef::Type ELFObjectFile<ELFT>::getSymbolType(DataRefImpl Symb) const {
const Elf_Sym *ESym = getSymbol(Symb);
switch (ESym->getType()) {
case ELF::STT_NOTYPE:
return SymbolRef::ST_Unknown;
case ELF::STT_SECTION:
return SymbolRef::ST_Debug;
case ELF::STT_FILE:
return SymbolRef::ST_File;
case ELF::STT_FUNC:
return SymbolRef::ST_Function;
case ELF::STT_OBJECT:
case ELF::STT_COMMON:
case ELF::STT_TLS:
return SymbolRef::ST_Data;
default:
return SymbolRef::ST_Other;
}
}
template <class ELFT>
uint32_t ELFObjectFile<ELFT>::getSymbolFlags(DataRefImpl Sym) const {
const Elf_Sym *ESym = toELFSymIter(Sym);
uint32_t Result = SymbolRef::SF_None;
if (ESym->getBinding() != ELF::STB_LOCAL)
Result |= SymbolRef::SF_Global;
if (ESym->getBinding() == ELF::STB_WEAK)
Result |= SymbolRef::SF_Weak;
if (ESym->st_shndx == ELF::SHN_ABS)
Result |= SymbolRef::SF_Absolute;
if (ESym->getType() == ELF::STT_FILE || ESym->getType() == ELF::STT_SECTION ||
ESym == EF.symbol_begin() || ESym == EF.dynamic_symbol_begin())
Result |= SymbolRef::SF_FormatSpecific;
if (EF.getHeader()->e_machine == ELF::EM_ARM) {
if (ErrorOr<StringRef> NameOrErr = getSymbolName(Sym)) {
StringRef Name = *NameOrErr;
if (Name.startswith("$d") || Name.startswith("$t") ||
Name.startswith("$a"))
Result |= SymbolRef::SF_FormatSpecific;
}
}
if (ESym->st_shndx == ELF::SHN_UNDEF)
Result |= SymbolRef::SF_Undefined;
if (ESym->getType() == ELF::STT_COMMON || ESym->st_shndx == ELF::SHN_COMMON)
Result |= SymbolRef::SF_Common;
if (isExportedToOtherDSO(ESym))
Result |= SymbolRef::SF_Exported;
if (ESym->getVisibility() == ELF::STV_HIDDEN)
Result |= SymbolRef::SF_Hidden;
return Result;
}
template <class ELFT>
section_iterator
ELFObjectFile<ELFT>::getSymbolSection(const Elf_Sym *ESym) const {
ErrorOr<const Elf_Shdr *> ESecOrErr = EF.getSection(ESym);
if (std::error_code EC = ESecOrErr.getError())
report_fatal_error(EC.message());
const Elf_Shdr *ESec = *ESecOrErr;
if (!ESec)
return section_end();
DataRefImpl Sec;
Sec.p = reinterpret_cast<intptr_t>(ESec);
return section_iterator(SectionRef(Sec, this));
}
template <class ELFT>
std::error_code
ELFObjectFile<ELFT>::getSymbolSection(DataRefImpl Symb,
section_iterator &Res) const {
Res = getSymbolSection(getSymbol(Symb));
return std::error_code();
}
template <class ELFT>
void ELFObjectFile<ELFT>::moveSectionNext(DataRefImpl &Sec) const {
const Elf_Shdr *ESec = toELFShdrIter(Sec);
Sec = toDRI(++ESec);
}
template <class ELFT>
std::error_code ELFObjectFile<ELFT>::getSectionName(DataRefImpl Sec,
StringRef &Result) const {
ErrorOr<StringRef> Name = EF.getSectionName(&*toELFShdrIter(Sec));
if (!Name)
return Name.getError();
Result = *Name;
return std::error_code();
}
template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSectionAddress(DataRefImpl Sec) const {
return toELFShdrIter(Sec)->sh_addr;
}
template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSectionSize(DataRefImpl Sec) const {
return toELFShdrIter(Sec)->sh_size;
}
template <class ELFT>
std::error_code
ELFObjectFile<ELFT>::getSectionContents(DataRefImpl Sec,
StringRef &Result) const {
const Elf_Shdr *EShdr = toELFShdrIter(Sec);
Result = StringRef((const char *)base() + EShdr->sh_offset, EShdr->sh_size);
return std::error_code();
}
template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSectionAlignment(DataRefImpl Sec) const {
return toELFShdrIter(Sec)->sh_addralign;
}
template <class ELFT>
bool ELFObjectFile<ELFT>::isSectionText(DataRefImpl Sec) const {
return toELFShdrIter(Sec)->sh_flags & ELF::SHF_EXECINSTR;
}
template <class ELFT>
bool ELFObjectFile<ELFT>::isSectionData(DataRefImpl Sec) const {
const Elf_Shdr *EShdr = toELFShdrIter(Sec);
return EShdr->sh_flags & (ELF::SHF_ALLOC | ELF::SHF_WRITE) &&
EShdr->sh_type == ELF::SHT_PROGBITS;
}
template <class ELFT>
bool ELFObjectFile<ELFT>::isSectionBSS(DataRefImpl Sec) const {
const Elf_Shdr *EShdr = toELFShdrIter(Sec);
return EShdr->sh_flags & (ELF::SHF_ALLOC | ELF::SHF_WRITE) &&
EShdr->sh_type == ELF::SHT_NOBITS;
}
template <class ELFT>
bool ELFObjectFile<ELFT>::isSectionVirtual(DataRefImpl Sec) const {
return toELFShdrIter(Sec)->sh_type == ELF::SHT_NOBITS;
}
template <class ELFT>
relocation_iterator
ELFObjectFile<ELFT>::section_rel_begin(DataRefImpl Sec) const {
DataRefImpl RelData;
uintptr_t SHT = reinterpret_cast<uintptr_t>(EF.section_begin());
RelData.d.a = (Sec.p - SHT) / EF.getHeader()->e_shentsize;
RelData.d.b = 0;
const Elf_Shdr *S = reinterpret_cast<const Elf_Shdr *>(Sec.p);
if (S->sh_type != ELF::SHT_RELA && S->sh_type != ELF::SHT_REL)
return relocation_iterator(RelocationRef(RelData, this));
const Elf_Shdr *RelSec = getRelSection(RelData);
ErrorOr<const Elf_Shdr *> SymSecOrErr = EF.getSection(RelSec->sh_link);
if (std::error_code EC = SymSecOrErr.getError())
report_fatal_error(EC.message());
const Elf_Shdr *SymSec = *SymSecOrErr;
uint32_t SymSecType = SymSec->sh_type;
if (SymSecType != ELF::SHT_SYMTAB && SymSecType != ELF::SHT_DYNSYM)
report_fatal_error("Invalid symbol table section type!");
if (SymSecType == ELF::SHT_DYNSYM)
RelData.d.b = 1;
return relocation_iterator(RelocationRef(RelData, this));
}
template <class ELFT>
relocation_iterator
ELFObjectFile<ELFT>::section_rel_end(DataRefImpl Sec) const {
const Elf_Shdr *S = reinterpret_cast<const Elf_Shdr *>(Sec.p);
relocation_iterator Begin = section_rel_begin(Sec);
if (S->sh_type != ELF::SHT_RELA && S->sh_type != ELF::SHT_REL)
return Begin;
DataRefImpl RelData = Begin->getRawDataRefImpl();
RelData.d.b += (S->sh_size / S->sh_entsize) << 1;
return relocation_iterator(RelocationRef(RelData, this));
}
template <class ELFT>
section_iterator
ELFObjectFile<ELFT>::getRelocatedSection(DataRefImpl Sec) const {
if (EF.getHeader()->e_type != ELF::ET_REL)
return section_end();
const Elf_Shdr *EShdr = toELFShdrIter(Sec);
uintX_t Type = EShdr->sh_type;
if (Type != ELF::SHT_REL && Type != ELF::SHT_RELA)
return section_end();
ErrorOr<const Elf_Shdr *> R = EF.getSection(EShdr->sh_info);
if (std::error_code EC = R.getError())
report_fatal_error(EC.message());
return section_iterator(SectionRef(toDRI(*R), this));
}
// Relocations
template <class ELFT>
void ELFObjectFile<ELFT>::moveRelocationNext(DataRefImpl &Rel) const {
Rel.d.b += 2;
}
template <class ELFT>
symbol_iterator
ELFObjectFile<ELFT>::getRelocationSymbol(DataRefImpl Rel) const {
uint32_t symbolIdx;
const Elf_Shdr *sec = getRelSection(Rel);
if (sec->sh_type == ELF::SHT_REL)
symbolIdx = getRel(Rel)->getSymbol(EF.isMips64EL());
else
symbolIdx = getRela(Rel)->getSymbol(EF.isMips64EL());
if (!symbolIdx)
return symbol_end();
bool IsDyn = Rel.d.b & 1;
DataRefImpl SymbolData;
if (IsDyn)
SymbolData = toDRI(EF.getDotDynSymSec(), symbolIdx);
else
SymbolData = toDRI(EF.getDotSymtabSec(), symbolIdx);
return symbol_iterator(SymbolRef(SymbolData, this));
}
template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getRelocationOffset(DataRefImpl Rel) const {
assert(EF.getHeader()->e_type == ELF::ET_REL &&
"Only relocatable object files have relocation offsets");
const Elf_Shdr *sec = getRelSection(Rel);
if (sec->sh_type == ELF::SHT_REL)
return getRel(Rel)->r_offset;
return getRela(Rel)->r_offset;
}
template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getRelocationType(DataRefImpl Rel) const {
const Elf_Shdr *sec = getRelSection(Rel);
if (sec->sh_type == ELF::SHT_REL)
return getRel(Rel)->getType(EF.isMips64EL());
else
return getRela(Rel)->getType(EF.isMips64EL());
}
template <class ELFT>
StringRef ELFObjectFile<ELFT>::getRelocationTypeName(uint32_t Type) const {
return getELFRelocationTypeName(EF.getHeader()->e_machine, Type);
}
template <class ELFT>
void ELFObjectFile<ELFT>::getRelocationTypeName(
DataRefImpl Rel, SmallVectorImpl<char> &Result) const {
uint32_t type = getRelocationType(Rel);
EF.getRelocationTypeName(type, Result);
}
template <class ELFT>
ErrorOr<int64_t>
ELFObjectFile<ELFT>::getRelocationAddend(DataRefImpl Rel) const {
if (getRelSection(Rel)->sh_type != ELF::SHT_RELA)
return object_error::parse_failed;
return (int64_t)getRela(Rel)->r_addend;
}
template <class ELFT>
const typename ELFFile<ELFT>::Elf_Sym *
ELFObjectFile<ELFT>::getSymbol(DataRefImpl Symb) const {
return &*toELFSymIter(Symb);
}
template <class ELFT>
const typename ELFObjectFile<ELFT>::Elf_Rel *
ELFObjectFile<ELFT>::getRel(DataRefImpl Rel) const {
assert(getRelSection(Rel)->sh_type == ELF::SHT_REL);
return EF.template getEntry<Elf_Rel>(Rel.d.a, Rel.d.b >> 1);
}
template <class ELFT>
const typename ELFObjectFile<ELFT>::Elf_Rela *
ELFObjectFile<ELFT>::getRela(DataRefImpl Rela) const {
assert(getRelSection(Rela)->sh_type == ELF::SHT_RELA);
return EF.template getEntry<Elf_Rela>(Rela.d.a, Rela.d.b >> 1);
}
template <class ELFT>
ELFObjectFile<ELFT>::ELFObjectFile(MemoryBufferRef Object, std::error_code &EC)
: ELFObjectFileBase(
getELFType(static_cast<endianness>(ELFT::TargetEndianness) ==
support::little,
ELFT::Is64Bits),
Object),
EF(Data.getBuffer(), EC) {}
template <class ELFT>
basic_symbol_iterator ELFObjectFile<ELFT>::symbol_begin_impl() const {
DataRefImpl Sym = toDRI(EF.getDotSymtabSec(), 0);
return basic_symbol_iterator(SymbolRef(Sym, this));
}
template <class ELFT>
basic_symbol_iterator ELFObjectFile<ELFT>::symbol_end_impl() const {
const Elf_Shdr *SymTab = EF.getDotSymtabSec();
if (!SymTab)
return symbol_begin_impl();
DataRefImpl Sym = toDRI(SymTab, SymTab->sh_size / sizeof(Elf_Sym));
return basic_symbol_iterator(SymbolRef(Sym, this));
}
template <class ELFT>
elf_symbol_iterator ELFObjectFile<ELFT>::dynamic_symbol_begin() const {
DataRefImpl Sym = toDRI(EF.getDotDynSymSec(), 0);
return symbol_iterator(SymbolRef(Sym, this));
}
template <class ELFT>
elf_symbol_iterator ELFObjectFile<ELFT>::dynamic_symbol_end() const {
const Elf_Shdr *SymTab = EF.getDotDynSymSec();
DataRefImpl Sym = toDRI(SymTab, SymTab->sh_size / sizeof(Elf_Sym));
return basic_symbol_iterator(SymbolRef(Sym, this));
}
template <class ELFT>
section_iterator ELFObjectFile<ELFT>::section_begin() const {
return section_iterator(SectionRef(toDRI(EF.section_begin()), this));
}
template <class ELFT>
section_iterator ELFObjectFile<ELFT>::section_end() const {
return section_iterator(SectionRef(toDRI(EF.section_end()), this));
}
template <class ELFT>
StringRef ELFObjectFile<ELFT>::getLoadName() const {
Elf_Dyn_Iter DI = EF.dynamic_table_begin();
Elf_Dyn_Iter DE = EF.dynamic_table_end();
while (DI != DE && DI->getTag() != ELF::DT_SONAME)
++DI;
if (DI != DE)
return EF.getDynamicString(DI->getVal());
return "";
}
template <class ELFT>
uint8_t ELFObjectFile<ELFT>::getBytesInAddress() const {
return ELFT::Is64Bits ? 8 : 4;
}
template <class ELFT>
StringRef ELFObjectFile<ELFT>::getFileFormatName() const {
bool IsLittleEndian = ELFT::TargetEndianness == support::little;
switch (EF.getHeader()->e_ident[ELF::EI_CLASS]) {
case ELF::ELFCLASS32:
switch (EF.getHeader()->e_machine) {
case ELF::EM_386:
return "ELF32-i386";
case ELF::EM_X86_64:
return "ELF32-x86-64";
case ELF::EM_ARM:
return (IsLittleEndian ? "ELF32-arm-little" : "ELF32-arm-big");
case ELF::EM_HEXAGON:
return "ELF32-hexagon";
case ELF::EM_MIPS:
return "ELF32-mips";
case ELF::EM_PPC:
return "ELF32-ppc";
case ELF::EM_SPARC:
case ELF::EM_SPARC32PLUS:
return "ELF32-sparc";
default:
return "ELF32-unknown";
}
case ELF::ELFCLASS64:
switch (EF.getHeader()->e_machine) {
case ELF::EM_386:
return "ELF64-i386";
case ELF::EM_X86_64:
return "ELF64-x86-64";
case ELF::EM_AARCH64:
return (IsLittleEndian ? "ELF64-aarch64-little" : "ELF64-aarch64-big");
case ELF::EM_PPC64:
return "ELF64-ppc64";
case ELF::EM_S390:
return "ELF64-s390";
case ELF::EM_SPARCV9:
return "ELF64-sparc";
case ELF::EM_MIPS:
return "ELF64-mips";
default:
return "ELF64-unknown";
}
default:
// FIXME: Proper error handling.
report_fatal_error("Invalid ELFCLASS!");
}
}
template <class ELFT>
unsigned ELFObjectFile<ELFT>::getArch() const {
bool IsLittleEndian = ELFT::TargetEndianness == support::little;
switch (EF.getHeader()->e_machine) {
case ELF::EM_386:
return Triple::x86;
case ELF::EM_X86_64:
return Triple::x86_64;
case ELF::EM_AARCH64:
return Triple::aarch64;
case ELF::EM_ARM:
return Triple::arm;
case ELF::EM_HEXAGON:
return Triple::hexagon;
case ELF::EM_MIPS:
switch (EF.getHeader()->e_ident[ELF::EI_CLASS]) {
case ELF::ELFCLASS32:
return IsLittleEndian ? Triple::mipsel : Triple::mips;
case ELF::ELFCLASS64:
return IsLittleEndian ? Triple::mips64el : Triple::mips64;
default:
report_fatal_error("Invalid ELFCLASS!");
}
case ELF::EM_PPC:
return Triple::ppc;
case ELF::EM_PPC64:
return IsLittleEndian ? Triple::ppc64le : Triple::ppc64;
case ELF::EM_S390:
return Triple::systemz;
case ELF::EM_SPARC:
case ELF::EM_SPARC32PLUS:
return IsLittleEndian ? Triple::sparcel : Triple::sparc;
case ELF::EM_SPARCV9:
return Triple::sparcv9;
default:
return Triple::UnknownArch;
}
}
template <class ELFT>
ELFObjectFileBase::elf_symbol_iterator_range
ELFObjectFile<ELFT>::getDynamicSymbolIterators() const {
return make_range(dynamic_symbol_begin(), dynamic_symbol_end());
}
template <class ELFT> bool ELFObjectFile<ELFT>::isRelocatableObject() const {
return EF.getHeader()->e_type == ELF::ET_REL;
}
}
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/SymbolSize.h | //===- SymbolSize.h ---------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_SYMBOLSIZE_H
#define LLVM_OBJECT_SYMBOLSIZE_H
#include "llvm/Object/ObjectFile.h"
namespace llvm {
namespace object {
std::vector<std::pair<SymbolRef, uint64_t>>
computeSymbolSizes(const ObjectFile &O);
}
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/ELFTypes.h | //===- ELFTypes.h - Endian specific types for ELF ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_ELFTYPES_H
#define LLVM_OBJECT_ELFTYPES_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Object/Error.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorOr.h"
namespace llvm {
namespace object {
using support::endianness;
template <endianness target_endianness, bool is64Bits> struct ELFType {
static const endianness TargetEndianness = target_endianness;
static const bool Is64Bits = is64Bits;
};
typedef ELFType<support::little, false> ELF32LE;
typedef ELFType<support::big, false> ELF32BE;
typedef ELFType<support::little, true> ELF64LE;
typedef ELFType<support::big, true> ELF64BE;
// Use an alignment of 2 for the typedefs since that is the worst case for
// ELF files in archives.
// Templates to choose Elf_Addr and Elf_Off depending on is64Bits.
template <endianness target_endianness> struct ELFDataTypeTypedefHelperCommon {
typedef support::detail::packed_endian_specific_integral<
uint16_t, target_endianness, 2> Elf_Half;
typedef support::detail::packed_endian_specific_integral<
uint32_t, target_endianness, 2> Elf_Word;
typedef support::detail::packed_endian_specific_integral<
int32_t, target_endianness, 2> Elf_Sword;
typedef support::detail::packed_endian_specific_integral<
uint64_t, target_endianness, 2> Elf_Xword;
typedef support::detail::packed_endian_specific_integral<
int64_t, target_endianness, 2> Elf_Sxword;
};
template <class ELFT> struct ELFDataTypeTypedefHelper;
/// ELF 32bit types.
template <endianness TargetEndianness>
struct ELFDataTypeTypedefHelper<ELFType<TargetEndianness, false>>
: ELFDataTypeTypedefHelperCommon<TargetEndianness> {
typedef uint32_t value_type;
typedef support::detail::packed_endian_specific_integral<
value_type, TargetEndianness, 2> Elf_Addr;
typedef support::detail::packed_endian_specific_integral<
value_type, TargetEndianness, 2> Elf_Off;
};
/// ELF 64bit types.
template <endianness TargetEndianness>
struct ELFDataTypeTypedefHelper<ELFType<TargetEndianness, true>>
: ELFDataTypeTypedefHelperCommon<TargetEndianness> {
typedef uint64_t value_type;
typedef support::detail::packed_endian_specific_integral<
value_type, TargetEndianness, 2> Elf_Addr;
typedef support::detail::packed_endian_specific_integral<
value_type, TargetEndianness, 2> Elf_Off;
};
// I really don't like doing this, but the alternative is copypasta.
#define LLVM_ELF_IMPORT_TYPES(E, W) \
typedef typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Addr Elf_Addr; \
typedef typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Off Elf_Off; \
typedef typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Half Elf_Half; \
typedef typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Word Elf_Word; \
typedef \
typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Sword Elf_Sword; \
typedef \
typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Xword Elf_Xword; \
typedef \
typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Sxword Elf_Sxword;
#define LLVM_ELF_IMPORT_TYPES_ELFT(ELFT) \
LLVM_ELF_IMPORT_TYPES(ELFT::TargetEndianness, ELFT::Is64Bits)
// Section header.
template <class ELFT> struct Elf_Shdr_Base;
template <endianness TargetEndianness>
struct Elf_Shdr_Base<ELFType<TargetEndianness, false>> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
Elf_Word sh_name; // Section name (index into string table)
Elf_Word sh_type; // Section type (SHT_*)
Elf_Word sh_flags; // Section flags (SHF_*)
Elf_Addr sh_addr; // Address where section is to be loaded
Elf_Off sh_offset; // File offset of section data, in bytes
Elf_Word sh_size; // Size of section, in bytes
Elf_Word sh_link; // Section type-specific header table index link
Elf_Word sh_info; // Section type-specific extra information
Elf_Word sh_addralign; // Section address alignment
Elf_Word sh_entsize; // Size of records contained within the section
};
template <endianness TargetEndianness>
struct Elf_Shdr_Base<ELFType<TargetEndianness, true>> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
Elf_Word sh_name; // Section name (index into string table)
Elf_Word sh_type; // Section type (SHT_*)
Elf_Xword sh_flags; // Section flags (SHF_*)
Elf_Addr sh_addr; // Address where section is to be loaded
Elf_Off sh_offset; // File offset of section data, in bytes
Elf_Xword sh_size; // Size of section, in bytes
Elf_Word sh_link; // Section type-specific header table index link
Elf_Word sh_info; // Section type-specific extra information
Elf_Xword sh_addralign; // Section address alignment
Elf_Xword sh_entsize; // Size of records contained within the section
};
template <class ELFT>
struct Elf_Shdr_Impl : Elf_Shdr_Base<ELFT> {
using Elf_Shdr_Base<ELFT>::sh_entsize;
using Elf_Shdr_Base<ELFT>::sh_size;
/// @brief Get the number of entities this section contains if it has any.
unsigned getEntityCount() const {
if (sh_entsize == 0)
return 0;
return sh_size / sh_entsize;
}
};
template <class ELFT> struct Elf_Sym_Base;
template <endianness TargetEndianness>
struct Elf_Sym_Base<ELFType<TargetEndianness, false>> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
Elf_Word st_name; // Symbol name (index into string table)
Elf_Addr st_value; // Value or address associated with the symbol
Elf_Word st_size; // Size of the symbol
unsigned char st_info; // Symbol's type and binding attributes
unsigned char st_other; // Must be zero; reserved
Elf_Half st_shndx; // Which section (header table index) it's defined in
};
template <endianness TargetEndianness>
struct Elf_Sym_Base<ELFType<TargetEndianness, true>> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
Elf_Word st_name; // Symbol name (index into string table)
unsigned char st_info; // Symbol's type and binding attributes
unsigned char st_other; // Must be zero; reserved
Elf_Half st_shndx; // Which section (header table index) it's defined in
Elf_Addr st_value; // Value or address associated with the symbol
Elf_Xword st_size; // Size of the symbol
};
template <class ELFT>
struct Elf_Sym_Impl : Elf_Sym_Base<ELFT> {
using Elf_Sym_Base<ELFT>::st_info;
using Elf_Sym_Base<ELFT>::st_shndx;
using Elf_Sym_Base<ELFT>::st_other;
using Elf_Sym_Base<ELFT>::st_value;
// These accessors and mutators correspond to the ELF32_ST_BIND,
// ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
unsigned char getBinding() const { return st_info >> 4; }
unsigned char getType() const { return st_info & 0x0f; }
uint64_t getValue() const { return st_value; }
void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
void setBindingAndType(unsigned char b, unsigned char t) {
st_info = (b << 4) + (t & 0x0f);
}
/// Access to the STV_xxx flag stored in the first two bits of st_other.
/// STV_DEFAULT: 0
/// STV_INTERNAL: 1
/// STV_HIDDEN: 2
/// STV_PROTECTED: 3
unsigned char getVisibility() const { return st_other & 0x3; }
void setVisibility(unsigned char v) {
assert(v < 4 && "Invalid value for visibility");
st_other = (st_other & ~0x3) | v;
}
bool isAbsolute() const { return st_shndx == ELF::SHN_ABS; }
bool isCommon() const {
return getType() == ELF::STT_COMMON || st_shndx == ELF::SHN_COMMON;
}
bool isDefined() const { return !isUndefined(); }
bool isProcessorSpecific() const {
return st_shndx >= ELF::SHN_LOPROC && st_shndx <= ELF::SHN_HIPROC;
}
bool isOSSpecific() const {
return st_shndx >= ELF::SHN_LOOS && st_shndx <= ELF::SHN_HIOS;
}
bool isReserved() const {
// ELF::SHN_HIRESERVE is 0xffff so st_shndx <= ELF::SHN_HIRESERVE is always
// true and some compilers warn about it.
return st_shndx >= ELF::SHN_LORESERVE;
}
bool isUndefined() const { return st_shndx == ELF::SHN_UNDEF; }
bool isExternal() const {
return getBinding() != ELF::STB_LOCAL;
}
ErrorOr<StringRef> getName(StringRef StrTab) const;
};
template <class ELFT>
ErrorOr<StringRef> Elf_Sym_Impl<ELFT>::getName(StringRef StrTab) const {
uint32_t Offset = this->st_name;
if (Offset >= StrTab.size())
return object_error::parse_failed;
return StringRef(StrTab.data() + Offset);
}
/// Elf_Versym: This is the structure of entries in the SHT_GNU_versym section
/// (.gnu.version). This structure is identical for ELF32 and ELF64.
template <class ELFT>
struct Elf_Versym_Impl {
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
Elf_Half vs_index; // Version index with flags (e.g. VERSYM_HIDDEN)
};
template <class ELFT> struct Elf_Verdaux_Impl;
/// Elf_Verdef: This is the structure of entries in the SHT_GNU_verdef section
/// (.gnu.version_d). This structure is identical for ELF32 and ELF64.
template <class ELFT>
struct Elf_Verdef_Impl {
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
typedef Elf_Verdaux_Impl<ELFT> Elf_Verdaux;
Elf_Half vd_version; // Version of this structure (e.g. VER_DEF_CURRENT)
Elf_Half vd_flags; // Bitwise flags (VER_DEF_*)
Elf_Half vd_ndx; // Version index, used in .gnu.version entries
Elf_Half vd_cnt; // Number of Verdaux entries
Elf_Word vd_hash; // Hash of name
Elf_Word vd_aux; // Offset to the first Verdaux entry (in bytes)
Elf_Word vd_next; // Offset to the next Verdef entry (in bytes)
/// Get the first Verdaux entry for this Verdef.
const Elf_Verdaux *getAux() const {
return reinterpret_cast<const Elf_Verdaux *>((const char *)this + vd_aux);
}
};
/// Elf_Verdaux: This is the structure of auxiliary data in the SHT_GNU_verdef
/// section (.gnu.version_d). This structure is identical for ELF32 and ELF64.
template <class ELFT>
struct Elf_Verdaux_Impl {
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
Elf_Word vda_name; // Version name (offset in string table)
Elf_Word vda_next; // Offset to next Verdaux entry (in bytes)
};
/// Elf_Verneed: This is the structure of entries in the SHT_GNU_verneed
/// section (.gnu.version_r). This structure is identical for ELF32 and ELF64.
template <class ELFT>
struct Elf_Verneed_Impl {
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
Elf_Half vn_version; // Version of this structure (e.g. VER_NEED_CURRENT)
Elf_Half vn_cnt; // Number of associated Vernaux entries
Elf_Word vn_file; // Library name (string table offset)
Elf_Word vn_aux; // Offset to first Vernaux entry (in bytes)
Elf_Word vn_next; // Offset to next Verneed entry (in bytes)
};
/// Elf_Vernaux: This is the structure of auxiliary data in SHT_GNU_verneed
/// section (.gnu.version_r). This structure is identical for ELF32 and ELF64.
template <class ELFT>
struct Elf_Vernaux_Impl {
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
Elf_Word vna_hash; // Hash of dependency name
Elf_Half vna_flags; // Bitwise Flags (VER_FLAG_*)
Elf_Half vna_other; // Version index, used in .gnu.version entries
Elf_Word vna_name; // Dependency name
Elf_Word vna_next; // Offset to next Vernaux entry (in bytes)
};
/// Elf_Dyn_Base: This structure matches the form of entries in the dynamic
/// table section (.dynamic) look like.
template <class ELFT> struct Elf_Dyn_Base;
template <endianness TargetEndianness>
struct Elf_Dyn_Base<ELFType<TargetEndianness, false>> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
Elf_Sword d_tag;
union {
Elf_Word d_val;
Elf_Addr d_ptr;
} d_un;
};
template <endianness TargetEndianness>
struct Elf_Dyn_Base<ELFType<TargetEndianness, true>> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
Elf_Sxword d_tag;
union {
Elf_Xword d_val;
Elf_Addr d_ptr;
} d_un;
};
/// Elf_Dyn_Impl: This inherits from Elf_Dyn_Base, adding getters and setters.
template <class ELFT>
struct Elf_Dyn_Impl : Elf_Dyn_Base<ELFT> {
using Elf_Dyn_Base<ELFT>::d_tag;
using Elf_Dyn_Base<ELFT>::d_un;
int64_t getTag() const { return d_tag; }
uint64_t getVal() const { return d_un.d_val; }
uint64_t getPtr() const { return d_un.d_ptr; }
};
// Elf_Rel: Elf Relocation
template <class ELFT, bool isRela> struct Elf_Rel_Impl;
template <endianness TargetEndianness>
struct Elf_Rel_Impl<ELFType<TargetEndianness, false>, false> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
Elf_Word r_info; // Symbol table index and type of relocation to apply
uint32_t getRInfo(bool isMips64EL) const {
assert(!isMips64EL);
return r_info;
}
void setRInfo(uint32_t R, bool IsMips64EL) {
assert(!IsMips64EL);
r_info = R;
}
// These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
// and ELF32_R_INFO macros defined in the ELF specification:
uint32_t getSymbol(bool isMips64EL) const {
return this->getRInfo(isMips64EL) >> 8;
}
unsigned char getType(bool isMips64EL) const {
return (unsigned char)(this->getRInfo(isMips64EL) & 0x0ff);
}
void setSymbol(uint32_t s, bool IsMips64EL) {
setSymbolAndType(s, getType(), IsMips64EL);
}
void setType(unsigned char t, bool IsMips64EL) {
setSymbolAndType(getSymbol(), t, IsMips64EL);
}
void setSymbolAndType(uint32_t s, unsigned char t, bool IsMips64EL) {
this->setRInfo((s << 8) + t, IsMips64EL);
}
};
template <endianness TargetEndianness>
struct Elf_Rel_Impl<ELFType<TargetEndianness, false>, true>
: public Elf_Rel_Impl<ELFType<TargetEndianness, false>, false> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
Elf_Sword r_addend; // Compute value for relocatable field by adding this
};
template <endianness TargetEndianness>
struct Elf_Rel_Impl<ELFType<TargetEndianness, true>, false> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
Elf_Xword r_info; // Symbol table index and type of relocation to apply
uint64_t getRInfo(bool isMips64EL) const {
uint64_t t = r_info;
if (!isMips64EL)
return t;
// Mips64 little endian has a "special" encoding of r_info. Instead of one
// 64 bit little endian number, it is a little endian 32 bit number followed
// by a 32 bit big endian number.
return (t << 32) | ((t >> 8) & 0xff000000) | ((t >> 24) & 0x00ff0000) |
((t >> 40) & 0x0000ff00) | ((t >> 56) & 0x000000ff);
}
void setRInfo(uint64_t R, bool IsMips64EL) {
if (IsMips64EL)
r_info = (R >> 32) | ((R & 0xff000000) << 8) | ((R & 0x00ff0000) << 24) |
((R & 0x0000ff00) << 40) | ((R & 0x000000ff) << 56);
else
r_info = R;
}
// These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
// and ELF64_R_INFO macros defined in the ELF specification:
uint32_t getSymbol(bool isMips64EL) const {
return (uint32_t)(this->getRInfo(isMips64EL) >> 32);
}
uint32_t getType(bool isMips64EL) const {
return (uint32_t)(this->getRInfo(isMips64EL) & 0xffffffffL);
}
void setSymbol(uint32_t s, bool IsMips64EL) {
setSymbolAndType(s, getType(), IsMips64EL);
}
void setType(uint32_t t, bool IsMips64EL) {
setSymbolAndType(getSymbol(), t, IsMips64EL);
}
void setSymbolAndType(uint32_t s, uint32_t t, bool IsMips64EL) {
this->setRInfo(((uint64_t)s << 32) + (t & 0xffffffffL), IsMips64EL);
}
};
template <endianness TargetEndianness>
struct Elf_Rel_Impl<ELFType<TargetEndianness, true>, true>
: public Elf_Rel_Impl<ELFType<TargetEndianness, true>, false> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
Elf_Sxword r_addend; // Compute value for relocatable field by adding this.
};
template <class ELFT>
struct Elf_Ehdr_Impl {
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes
Elf_Half e_type; // Type of file (see ET_*)
Elf_Half e_machine; // Required architecture for this file (see EM_*)
Elf_Word e_version; // Must be equal to 1
Elf_Addr e_entry; // Address to jump to in order to start program
Elf_Off e_phoff; // Program header table's file offset, in bytes
Elf_Off e_shoff; // Section header table's file offset, in bytes
Elf_Word e_flags; // Processor-specific flags
Elf_Half e_ehsize; // Size of ELF header, in bytes
Elf_Half e_phentsize; // Size of an entry in the program header table
Elf_Half e_phnum; // Number of entries in the program header table
Elf_Half e_shentsize; // Size of an entry in the section header table
Elf_Half e_shnum; // Number of entries in the section header table
Elf_Half e_shstrndx; // Section header table index of section name
// string table
bool checkMagic() const {
return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
}
unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; }
unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; }
};
template <class ELFT> struct Elf_Phdr_Impl;
template <endianness TargetEndianness>
struct Elf_Phdr_Impl<ELFType<TargetEndianness, false>> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
Elf_Word p_type; // Type of segment
Elf_Off p_offset; // FileOffset where segment is located, in bytes
Elf_Addr p_vaddr; // Virtual Address of beginning of segment
Elf_Addr p_paddr; // Physical address of beginning of segment (OS-specific)
Elf_Word p_filesz; // Num. of bytes in file image of segment (may be zero)
Elf_Word p_memsz; // Num. of bytes in mem image of segment (may be zero)
Elf_Word p_flags; // Segment flags
Elf_Word p_align; // Segment alignment constraint
};
template <endianness TargetEndianness>
struct Elf_Phdr_Impl<ELFType<TargetEndianness, true>> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
Elf_Word p_type; // Type of segment
Elf_Word p_flags; // Segment flags
Elf_Off p_offset; // FileOffset where segment is located, in bytes
Elf_Addr p_vaddr; // Virtual Address of beginning of segment
Elf_Addr p_paddr; // Physical address of beginning of segment (OS-specific)
Elf_Xword p_filesz; // Num. of bytes in file image of segment (may be zero)
Elf_Xword p_memsz; // Num. of bytes in mem image of segment (may be zero)
Elf_Xword p_align; // Segment alignment constraint
};
// ELFT needed for endianess.
template <class ELFT>
struct Elf_Hash_Impl {
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
Elf_Word nbucket;
Elf_Word nchain;
ArrayRef<Elf_Word> buckets() const {
return ArrayRef<Elf_Word>(&nbucket + 2, &nbucket + 2 + nbucket);
}
ArrayRef<Elf_Word> chains() const {
return ArrayRef<Elf_Word>(&nbucket + 2 + nbucket,
&nbucket + 2 + nbucket + nchain);
}
};
// MIPS .reginfo section
template <class ELFT>
struct Elf_Mips_RegInfo;
template <llvm::support::endianness TargetEndianness>
struct Elf_Mips_RegInfo<ELFType<TargetEndianness, false>> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
Elf_Word ri_gprmask; // bit-mask of used general registers
Elf_Word ri_cprmask[4]; // bit-mask of used co-processor registers
Elf_Addr ri_gp_value; // gp register value
};
template <llvm::support::endianness TargetEndianness>
struct Elf_Mips_RegInfo<ELFType<TargetEndianness, true>> {
LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
Elf_Word ri_gprmask; // bit-mask of used general registers
Elf_Word ri_pad; // unused padding field
Elf_Word ri_cprmask[4]; // bit-mask of used co-processor registers
Elf_Addr ri_gp_value; // gp register value
};
// .MIPS.options section
template <class ELFT> struct Elf_Mips_Options {
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
uint8_t kind; // Determines interpretation of variable part of descriptor
uint8_t size; // Byte size of descriptor, including this header
Elf_Half section; // Section header index of section affected,
// or 0 for global options
Elf_Word info; // Kind-specific information
const Elf_Mips_RegInfo<ELFT> &getRegInfo() const {
assert(kind == llvm::ELF::ODK_REGINFO);
return *reinterpret_cast<const Elf_Mips_RegInfo<ELFT> *>(
(const uint8_t *)this + sizeof(Elf_Mips_Options));
}
};
// .MIPS.abiflags section content
template <class ELFT> struct Elf_Mips_ABIFlags {
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
Elf_Half version; // Version of the structure
uint8_t isa_level; // ISA level: 1-5, 32, and 64
uint8_t isa_rev; // ISA revision (0 for MIPS I - MIPS V)
uint8_t gpr_size; // General purpose registers size
uint8_t cpr1_size; // Co-processor 1 registers size
uint8_t cpr2_size; // Co-processor 2 registers size
uint8_t fp_abi; // Floating-point ABI flag
Elf_Word isa_ext; // Processor-specific extension
Elf_Word ases; // ASEs flags
Elf_Word flags1; // General flags
Elf_Word flags2; // General flags
};
} // end namespace object.
} // end namespace llvm.
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/Archive.h | //===- Archive.h - ar archive file format -----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the ar archive file format class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_ARCHIVE_H
#define LLVM_OBJECT_ARCHIVE_H
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Object/Binary.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
namespace llvm {
namespace object {
struct ArchiveMemberHeader {
char Name[16];
char LastModified[12];
char UID[6];
char GID[6];
char AccessMode[8];
char Size[10]; ///< Size of data, not including header or padding.
char Terminator[2];
/// Get the name without looking up long names.
llvm::StringRef getName() const;
/// Members are not larger than 4GB.
uint32_t getSize() const;
sys::fs::perms getAccessMode() const;
sys::TimeValue getLastModified() const;
llvm::StringRef getRawLastModified() const {
return StringRef(LastModified, sizeof(LastModified)).rtrim(" ");
}
unsigned getUID() const;
unsigned getGID() const;
};
class Archive : public Binary {
virtual void anchor();
public:
class Child {
const Archive *Parent;
/// \brief Includes header but not padding byte.
StringRef Data;
/// \brief Offset from Data to the start of the file.
uint16_t StartOfFile;
const ArchiveMemberHeader *getHeader() const {
return reinterpret_cast<const ArchiveMemberHeader *>(Data.data());
}
public:
Child(const Archive *Parent, const char *Start);
bool operator ==(const Child &other) const {
assert(Parent == other.Parent);
return Data.begin() == other.Data.begin();
}
bool operator <(const Child &other) const {
return Data.begin() < other.Data.begin();
}
Child getNext() const;
ErrorOr<StringRef> getName() const;
StringRef getRawName() const { return getHeader()->getName(); }
sys::TimeValue getLastModified() const {
return getHeader()->getLastModified();
}
StringRef getRawLastModified() const {
return getHeader()->getRawLastModified();
}
unsigned getUID() const { return getHeader()->getUID(); }
unsigned getGID() const { return getHeader()->getGID(); }
sys::fs::perms getAccessMode() const {
return getHeader()->getAccessMode();
}
/// \return the size of the archive member without the header or padding.
uint64_t getSize() const;
/// \return the size in the archive header for this member.
uint64_t getRawSize() const;
ErrorOr<StringRef> getBuffer() const;
uint64_t getChildOffset() const;
ErrorOr<MemoryBufferRef> getMemoryBufferRef() const;
ErrorOr<std::unique_ptr<Binary>>
getAsBinary(LLVMContext *Context = nullptr) const;
};
class child_iterator {
Child child;
public:
child_iterator() : child(Child(nullptr, nullptr)) {}
child_iterator(const Child &c) : child(c) {}
const Child *operator->() const { return &child; }
const Child &operator*() const { return child; }
bool operator==(const child_iterator &other) const {
return child == other.child;
}
bool operator!=(const child_iterator &other) const {
return !(*this == other);
}
bool operator<(const child_iterator &other) const {
return child < other.child;
}
child_iterator &operator++() { // Preincrement
child = child.getNext();
return *this;
}
};
class Symbol {
const Archive *Parent;
uint32_t SymbolIndex;
uint32_t StringIndex; // Extra index to the string.
public:
bool operator ==(const Symbol &other) const {
return (Parent == other.Parent) && (SymbolIndex == other.SymbolIndex);
}
Symbol(const Archive *p, uint32_t symi, uint32_t stri)
: Parent(p)
, SymbolIndex(symi)
, StringIndex(stri) {}
StringRef getName() const;
ErrorOr<child_iterator> getMember() const;
Symbol getNext() const;
};
class symbol_iterator {
Symbol symbol;
public:
symbol_iterator(const Symbol &s) : symbol(s) {}
const Symbol *operator->() const { return &symbol; }
const Symbol &operator*() const { return symbol; }
bool operator==(const symbol_iterator &other) const {
return symbol == other.symbol;
}
bool operator!=(const symbol_iterator &other) const {
return !(*this == other);
}
symbol_iterator& operator++() { // Preincrement
symbol = symbol.getNext();
return *this;
}
};
Archive(MemoryBufferRef Source, std::error_code &EC);
static ErrorOr<std::unique_ptr<Archive>> create(MemoryBufferRef Source);
enum Kind {
K_GNU,
K_MIPS64,
K_BSD,
K_COFF
};
Kind kind() const { return (Kind)Format; }
bool isThin() const { return IsThin; }
child_iterator child_begin(bool SkipInternal = true) const;
child_iterator child_end() const;
iterator_range<child_iterator> children(bool SkipInternal = true) const {
return iterator_range<child_iterator>(child_begin(SkipInternal),
child_end());
}
symbol_iterator symbol_begin() const;
symbol_iterator symbol_end() const;
iterator_range<symbol_iterator> symbols() const {
return iterator_range<symbol_iterator>(symbol_begin(), symbol_end());
}
// Cast methods.
static inline bool classof(Binary const *v) {
return v->isArchive();
}
// check if a symbol is in the archive
child_iterator findSym(StringRef name) const;
bool hasSymbolTable() const;
child_iterator getSymbolTableChild() const { return SymbolTable; }
StringRef getSymbolTable() const {
// We know that the symbol table is not an external file,
// so we just assert there is no error.
return *SymbolTable->getBuffer();
}
uint32_t getNumberOfSymbols() const;
private:
child_iterator SymbolTable;
child_iterator StringTable;
child_iterator FirstRegular;
unsigned Format : 2;
unsigned IsThin : 1;
mutable std::vector<std::unique_ptr<MemoryBuffer>> ThinBuffers;
};
}
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/ELF.h | //===- ELF.h - ELF object file implementation -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the ELFFile template class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_ELF_H
#define LLVM_OBJECT_ELF_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Object/ELFTypes.h"
#include "llvm/Object/Error.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <limits>
#include <utility>
namespace llvm {
namespace object {
StringRef getELFRelocationTypeName(uint32_t Machine, uint32_t Type);
// Subclasses of ELFFile may need this for template instantiation
inline std::pair<unsigned char, unsigned char>
getElfArchType(StringRef Object) {
if (Object.size() < ELF::EI_NIDENT)
return std::make_pair((uint8_t)ELF::ELFCLASSNONE,
(uint8_t)ELF::ELFDATANONE);
return std::make_pair((uint8_t)Object[ELF::EI_CLASS],
(uint8_t)Object[ELF::EI_DATA]);
}
template <class ELFT>
class ELFFile {
public:
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
typedef typename std::conditional<ELFT::Is64Bits,
uint64_t, uint32_t>::type uintX_t;
/// \brief Iterate over constant sized entities.
template <class EntT>
class ELFEntityIterator {
public:
typedef ptrdiff_t difference_type;
typedef EntT value_type;
typedef std::forward_iterator_tag iterator_category;
typedef value_type &reference;
typedef value_type *pointer;
/// \brief Default construct iterator.
ELFEntityIterator() : EntitySize(0), Current(nullptr) {}
ELFEntityIterator(uintX_t EntSize, const char *Start)
: EntitySize(EntSize), Current(Start) {}
reference operator *() {
assert(Current && "Attempted to dereference an invalid iterator!");
return *reinterpret_cast<pointer>(Current);
}
pointer operator ->() {
assert(Current && "Attempted to dereference an invalid iterator!");
return reinterpret_cast<pointer>(Current);
}
bool operator ==(const ELFEntityIterator &Other) {
return Current == Other.Current;
}
bool operator !=(const ELFEntityIterator &Other) {
return !(*this == Other);
}
ELFEntityIterator &operator ++() {
assert(Current && "Attempted to increment an invalid iterator!");
Current += EntitySize;
return *this;
}
ELFEntityIterator &operator+(difference_type n) {
assert(Current && "Attempted to increment an invalid iterator!");
Current += (n * EntitySize);
return *this;
}
ELFEntityIterator &operator-(difference_type n) {
assert(Current && "Attempted to subtract an invalid iterator!");
Current -= (n * EntitySize);
return *this;
}
ELFEntityIterator operator ++(int) {
ELFEntityIterator Tmp = *this;
++*this;
return Tmp;
}
difference_type operator -(const ELFEntityIterator &Other) const {
assert(EntitySize == Other.EntitySize &&
"Subtracting iterators of different EntitySize!");
return (Current - Other.Current) / EntitySize;
}
const char *get() const { return Current; }
uintX_t getEntSize() const { return EntitySize; }
private:
uintX_t EntitySize;
const char *Current;
};
typedef Elf_Ehdr_Impl<ELFT> Elf_Ehdr;
typedef Elf_Shdr_Impl<ELFT> Elf_Shdr;
typedef Elf_Sym_Impl<ELFT> Elf_Sym;
typedef Elf_Dyn_Impl<ELFT> Elf_Dyn;
typedef Elf_Phdr_Impl<ELFT> Elf_Phdr;
typedef Elf_Rel_Impl<ELFT, false> Elf_Rel;
typedef Elf_Rel_Impl<ELFT, true> Elf_Rela;
typedef Elf_Verdef_Impl<ELFT> Elf_Verdef;
typedef Elf_Verdaux_Impl<ELFT> Elf_Verdaux;
typedef Elf_Verneed_Impl<ELFT> Elf_Verneed;
typedef Elf_Vernaux_Impl<ELFT> Elf_Vernaux;
typedef Elf_Versym_Impl<ELFT> Elf_Versym;
typedef Elf_Hash_Impl<ELFT> Elf_Hash;
typedef ELFEntityIterator<const Elf_Dyn> Elf_Dyn_Iter;
typedef iterator_range<Elf_Dyn_Iter> Elf_Dyn_Range;
typedef ELFEntityIterator<const Elf_Rela> Elf_Rela_Iter;
typedef ELFEntityIterator<const Elf_Rel> Elf_Rel_Iter;
typedef iterator_range<const Elf_Shdr *> Elf_Shdr_Range;
/// \brief Archive files are 2 byte aligned, so we need this for
/// PointerIntPair to work.
template <typename T>
class ArchivePointerTypeTraits {
public:
static inline const void *getAsVoidPointer(T *P) { return P; }
static inline T *getFromVoidPointer(const void *P) {
return static_cast<T *>(P);
}
enum { NumLowBitsAvailable = 1 };
};
typedef iterator_range<const Elf_Sym *> Elf_Sym_Range;
private:
typedef SmallVector<const Elf_Shdr *, 2> Sections_t;
typedef DenseMap<unsigned, unsigned> IndexMap_t;
StringRef Buf;
const uint8_t *base() const {
return reinterpret_cast<const uint8_t *>(Buf.data());
}
const Elf_Ehdr *Header;
const Elf_Shdr *SectionHeaderTable = nullptr;
StringRef DotShstrtab; // Section header string table.
StringRef DotStrtab; // Symbol header string table.
const Elf_Shdr *dot_symtab_sec = nullptr; // Symbol table section.
const Elf_Shdr *DotDynSymSec = nullptr; // Dynamic symbol table section.
const Elf_Hash *HashTable = nullptr;
const Elf_Shdr *SymbolTableSectionHeaderIndex = nullptr;
DenseMap<const Elf_Sym *, ELF::Elf64_Word> ExtendedSymbolTable;
const Elf_Shdr *dot_gnu_version_sec = nullptr; // .gnu.version
const Elf_Shdr *dot_gnu_version_r_sec = nullptr; // .gnu.version_r
const Elf_Shdr *dot_gnu_version_d_sec = nullptr; // .gnu.version_d
/// \brief Represents a region described by entries in the .dynamic table.
struct DynRegionInfo {
DynRegionInfo() : Addr(nullptr), Size(0), EntSize(0) {}
/// \brief Address in current address space.
const void *Addr;
/// \brief Size in bytes of the region.
uintX_t Size;
/// \brief Size of each entity in the region.
uintX_t EntSize;
};
DynRegionInfo DynamicRegion;
DynRegionInfo DynHashRegion;
DynRegionInfo DynStrRegion;
DynRegionInfo DynRelaRegion;
// Pointer to SONAME entry in dynamic string table
// This is set the first time getLoadName is called.
mutable const char *dt_soname = nullptr;
// Records for each version index the corresponding Verdef or Vernaux entry.
// This is filled the first time LoadVersionMap() is called.
class VersionMapEntry : public PointerIntPair<const void*, 1> {
public:
// If the integer is 0, this is an Elf_Verdef*.
// If the integer is 1, this is an Elf_Vernaux*.
VersionMapEntry() : PointerIntPair<const void*, 1>(nullptr, 0) { }
VersionMapEntry(const Elf_Verdef *verdef)
: PointerIntPair<const void*, 1>(verdef, 0) { }
VersionMapEntry(const Elf_Vernaux *vernaux)
: PointerIntPair<const void*, 1>(vernaux, 1) { }
bool isNull() const { return getPointer() == nullptr; }
bool isVerdef() const { return !isNull() && getInt() == 0; }
bool isVernaux() const { return !isNull() && getInt() == 1; }
const Elf_Verdef *getVerdef() const {
return isVerdef() ? (const Elf_Verdef*)getPointer() : nullptr;
}
const Elf_Vernaux *getVernaux() const {
return isVernaux() ? (const Elf_Vernaux*)getPointer() : nullptr;
}
};
mutable SmallVector<VersionMapEntry, 16> VersionMap;
void LoadVersionDefs(const Elf_Shdr *sec) const;
void LoadVersionNeeds(const Elf_Shdr *ec) const;
void LoadVersionMap() const;
void scanDynamicTable();
public:
template<typename T>
const T *getEntry(uint32_t Section, uint32_t Entry) const;
template <typename T>
const T *getEntry(const Elf_Shdr *Section, uint32_t Entry) const;
const Elf_Shdr *getDotSymtabSec() const { return dot_symtab_sec; }
const Elf_Shdr *getDotDynSymSec() const { return DotDynSymSec; }
const Elf_Hash *getHashTable() const { return HashTable; }
ErrorOr<StringRef> getStringTable(const Elf_Shdr *Section) const;
const char *getDynamicString(uintX_t Offset) const;
ErrorOr<StringRef> getSymbolVersion(const Elf_Shdr *section,
const Elf_Sym *Symb,
bool &IsDefault) const;
void VerifyStrTab(const Elf_Shdr *sh) const;
StringRef getRelocationTypeName(uint32_t Type) const;
void getRelocationTypeName(uint32_t Type,
SmallVectorImpl<char> &Result) const;
/// \brief Get the symbol table section and symbol for a given relocation.
template <class RelT>
std::pair<const Elf_Shdr *, const Elf_Sym *>
getRelocationSymbol(const Elf_Shdr *RelSec, const RelT *Rel) const;
ELFFile(StringRef Object, std::error_code &EC);
bool isMipsELF64() const {
return Header->e_machine == ELF::EM_MIPS &&
Header->getFileClass() == ELF::ELFCLASS64;
}
bool isMips64EL() const {
return Header->e_machine == ELF::EM_MIPS &&
Header->getFileClass() == ELF::ELFCLASS64 &&
Header->getDataEncoding() == ELF::ELFDATA2LSB;
}
const Elf_Shdr *section_begin() const;
const Elf_Shdr *section_end() const;
Elf_Shdr_Range sections() const {
return make_range(section_begin(), section_end());
}
const Elf_Sym *symbol_begin() const;
const Elf_Sym *symbol_end() const;
Elf_Sym_Range symbols() const {
return make_range(symbol_begin(), symbol_end());
}
Elf_Dyn_Iter dynamic_table_begin() const;
/// \param NULLEnd use one past the first DT_NULL entry as the end instead of
/// the section size.
Elf_Dyn_Iter dynamic_table_end(bool NULLEnd = false) const;
Elf_Dyn_Range dynamic_table(bool NULLEnd = false) const {
return make_range(dynamic_table_begin(), dynamic_table_end(NULLEnd));
}
const Elf_Sym *dynamic_symbol_begin() const {
if (!DotDynSymSec)
return nullptr;
if (DotDynSymSec->sh_entsize != sizeof(Elf_Sym))
report_fatal_error("Invalid symbol size");
return reinterpret_cast<const Elf_Sym *>(base() + DotDynSymSec->sh_offset);
}
const Elf_Sym *dynamic_symbol_end() const {
if (!DotDynSymSec)
return nullptr;
return reinterpret_cast<const Elf_Sym *>(base() + DotDynSymSec->sh_offset +
DotDynSymSec->sh_size);
}
Elf_Sym_Range dynamic_symbols() const {
return make_range(dynamic_symbol_begin(), dynamic_symbol_end());
}
Elf_Rela_Iter dyn_rela_begin() const {
if (DynRelaRegion.Addr)
return Elf_Rela_Iter(DynRelaRegion.EntSize,
(const char *)DynRelaRegion.Addr);
return Elf_Rela_Iter(0, nullptr);
}
Elf_Rela_Iter dyn_rela_end() const {
if (DynRelaRegion.Addr)
return Elf_Rela_Iter(
DynRelaRegion.EntSize,
(const char *)DynRelaRegion.Addr + DynRelaRegion.Size);
return Elf_Rela_Iter(0, nullptr);
}
Elf_Rela_Iter rela_begin(const Elf_Shdr *sec) const {
return Elf_Rela_Iter(sec->sh_entsize,
(const char *)(base() + sec->sh_offset));
}
Elf_Rela_Iter rela_end(const Elf_Shdr *sec) const {
return Elf_Rela_Iter(
sec->sh_entsize,
(const char *)(base() + sec->sh_offset + sec->sh_size));
}
Elf_Rel_Iter rel_begin(const Elf_Shdr *sec) const {
return Elf_Rel_Iter(sec->sh_entsize,
(const char *)(base() + sec->sh_offset));
}
Elf_Rel_Iter rel_end(const Elf_Shdr *sec) const {
return Elf_Rel_Iter(sec->sh_entsize,
(const char *)(base() + sec->sh_offset + sec->sh_size));
}
/// \brief Iterate over program header table.
typedef ELFEntityIterator<const Elf_Phdr> Elf_Phdr_Iter;
Elf_Phdr_Iter program_header_begin() const {
return Elf_Phdr_Iter(Header->e_phentsize,
(const char*)base() + Header->e_phoff);
}
Elf_Phdr_Iter program_header_end() const {
return Elf_Phdr_Iter(Header->e_phentsize,
(const char*)base() +
Header->e_phoff +
(Header->e_phnum * Header->e_phentsize));
}
uint64_t getNumSections() const;
uintX_t getStringTableIndex() const;
ELF::Elf64_Word getExtendedSymbolTableIndex(const Elf_Sym *symb) const;
const Elf_Ehdr *getHeader() const { return Header; }
ErrorOr<const Elf_Shdr *> getSection(const Elf_Sym *symb) const;
ErrorOr<const Elf_Shdr *> getSection(uint32_t Index) const;
const Elf_Sym *getSymbol(uint32_t index) const;
ErrorOr<StringRef> getStaticSymbolName(const Elf_Sym *Symb) const;
ErrorOr<StringRef> getDynamicSymbolName(const Elf_Sym *Symb) const;
ErrorOr<StringRef> getSymbolName(const Elf_Sym *Symb, bool IsDynamic) const;
ErrorOr<StringRef> getSectionName(const Elf_Shdr *Section) const;
ErrorOr<ArrayRef<uint8_t> > getSectionContents(const Elf_Shdr *Sec) const;
StringRef getLoadName() const;
};
typedef ELFFile<ELFType<support::little, false>> ELF32LEFile;
typedef ELFFile<ELFType<support::little, true>> ELF64LEFile;
typedef ELFFile<ELFType<support::big, false>> ELF32BEFile;
typedef ELFFile<ELFType<support::big, true>> ELF64BEFile;
// Iterate through the version definitions, and place each Elf_Verdef
// in the VersionMap according to its index.
template <class ELFT>
void ELFFile<ELFT>::LoadVersionDefs(const Elf_Shdr *sec) const {
unsigned vd_size = sec->sh_size; // Size of section in bytes
unsigned vd_count = sec->sh_info; // Number of Verdef entries
const char *sec_start = (const char*)base() + sec->sh_offset;
const char *sec_end = sec_start + vd_size;
// The first Verdef entry is at the start of the section.
const char *p = sec_start;
for (unsigned i = 0; i < vd_count; i++) {
if (p + sizeof(Elf_Verdef) > sec_end)
report_fatal_error("Section ended unexpectedly while scanning "
"version definitions.");
const Elf_Verdef *vd = reinterpret_cast<const Elf_Verdef *>(p);
if (vd->vd_version != ELF::VER_DEF_CURRENT)
report_fatal_error("Unexpected verdef version");
size_t index = vd->vd_ndx & ELF::VERSYM_VERSION;
if (index >= VersionMap.size())
VersionMap.resize(index + 1);
VersionMap[index] = VersionMapEntry(vd);
p += vd->vd_next;
}
}
// Iterate through the versions needed section, and place each Elf_Vernaux
// in the VersionMap according to its index.
template <class ELFT>
void ELFFile<ELFT>::LoadVersionNeeds(const Elf_Shdr *sec) const {
unsigned vn_size = sec->sh_size; // Size of section in bytes
unsigned vn_count = sec->sh_info; // Number of Verneed entries
const char *sec_start = (const char *)base() + sec->sh_offset;
const char *sec_end = sec_start + vn_size;
// The first Verneed entry is at the start of the section.
const char *p = sec_start;
for (unsigned i = 0; i < vn_count; i++) {
if (p + sizeof(Elf_Verneed) > sec_end)
report_fatal_error("Section ended unexpectedly while scanning "
"version needed records.");
const Elf_Verneed *vn = reinterpret_cast<const Elf_Verneed *>(p);
if (vn->vn_version != ELF::VER_NEED_CURRENT)
report_fatal_error("Unexpected verneed version");
// Iterate through the Vernaux entries
const char *paux = p + vn->vn_aux;
for (unsigned j = 0; j < vn->vn_cnt; j++) {
if (paux + sizeof(Elf_Vernaux) > sec_end)
report_fatal_error("Section ended unexpected while scanning auxiliary "
"version needed records.");
const Elf_Vernaux *vna = reinterpret_cast<const Elf_Vernaux *>(paux);
size_t index = vna->vna_other & ELF::VERSYM_VERSION;
if (index >= VersionMap.size())
VersionMap.resize(index + 1);
VersionMap[index] = VersionMapEntry(vna);
paux += vna->vna_next;
}
p += vn->vn_next;
}
}
template <class ELFT>
void ELFFile<ELFT>::LoadVersionMap() const {
// If there is no dynamic symtab or version table, there is nothing to do.
if (!DotDynSymSec || !dot_gnu_version_sec)
return;
// Has the VersionMap already been loaded?
if (VersionMap.size() > 0)
return;
// The first two version indexes are reserved.
// Index 0 is LOCAL, index 1 is GLOBAL.
VersionMap.push_back(VersionMapEntry());
VersionMap.push_back(VersionMapEntry());
if (dot_gnu_version_d_sec)
LoadVersionDefs(dot_gnu_version_d_sec);
if (dot_gnu_version_r_sec)
LoadVersionNeeds(dot_gnu_version_r_sec);
}
template <class ELFT>
ELF::Elf64_Word
ELFFile<ELFT>::getExtendedSymbolTableIndex(const Elf_Sym *symb) const {
assert(symb->st_shndx == ELF::SHN_XINDEX);
return ExtendedSymbolTable.lookup(symb);
}
template <class ELFT>
ErrorOr<const typename ELFFile<ELFT>::Elf_Shdr *>
ELFFile<ELFT>::getSection(const Elf_Sym *symb) const {
uint32_t Index = symb->st_shndx;
if (Index == ELF::SHN_XINDEX)
return getSection(ExtendedSymbolTable.lookup(symb));
if (Index == ELF::SHN_UNDEF || Index >= ELF::SHN_LORESERVE)
return nullptr;
return getSection(symb->st_shndx);
}
template <class ELFT>
const typename ELFFile<ELFT>::Elf_Sym *
ELFFile<ELFT>::getSymbol(uint32_t Index) const {
return &*(symbol_begin() + Index);
}
template <class ELFT>
ErrorOr<ArrayRef<uint8_t> >
ELFFile<ELFT>::getSectionContents(const Elf_Shdr *Sec) const {
if (Sec->sh_offset + Sec->sh_size > Buf.size())
return object_error::parse_failed;
const uint8_t *Start = base() + Sec->sh_offset;
return makeArrayRef(Start, Sec->sh_size);
}
template <class ELFT>
StringRef ELFFile<ELFT>::getRelocationTypeName(uint32_t Type) const {
return getELFRelocationTypeName(Header->e_machine, Type);
}
template <class ELFT>
void ELFFile<ELFT>::getRelocationTypeName(uint32_t Type,
SmallVectorImpl<char> &Result) const {
if (!isMipsELF64()) {
StringRef Name = getRelocationTypeName(Type);
Result.append(Name.begin(), Name.end());
} else {
// The Mips N64 ABI allows up to three operations to be specified per
// relocation record. Unfortunately there's no easy way to test for the
// presence of N64 ELFs as they have no special flag that identifies them
// as being N64. We can safely assume at the moment that all Mips
// ELFCLASS64 ELFs are N64. New Mips64 ABIs should provide enough
// information to disambiguate between old vs new ABIs.
uint8_t Type1 = (Type >> 0) & 0xFF;
uint8_t Type2 = (Type >> 8) & 0xFF;
uint8_t Type3 = (Type >> 16) & 0xFF;
// Concat all three relocation type names.
StringRef Name = getRelocationTypeName(Type1);
Result.append(Name.begin(), Name.end());
Name = getRelocationTypeName(Type2);
Result.append(1, '/');
Result.append(Name.begin(), Name.end());
Name = getRelocationTypeName(Type3);
Result.append(1, '/');
Result.append(Name.begin(), Name.end());
}
}
template <class ELFT>
template <class RelT>
std::pair<const typename ELFFile<ELFT>::Elf_Shdr *,
const typename ELFFile<ELFT>::Elf_Sym *>
ELFFile<ELFT>::getRelocationSymbol(const Elf_Shdr *Sec, const RelT *Rel) const {
if (!Sec->sh_link)
return std::make_pair(nullptr, nullptr);
ErrorOr<const Elf_Shdr *> SymTableOrErr = getSection(Sec->sh_link);
if (std::error_code EC = SymTableOrErr.getError())
report_fatal_error(EC.message());
const Elf_Shdr *SymTable = *SymTableOrErr;
return std::make_pair(
SymTable, getEntry<Elf_Sym>(SymTable, Rel->getSymbol(isMips64EL())));
}
template <class ELFT>
uint64_t ELFFile<ELFT>::getNumSections() const {
assert(Header && "Header not initialized!");
if (Header->e_shnum == ELF::SHN_UNDEF && Header->e_shoff > 0) {
assert(SectionHeaderTable && "SectionHeaderTable not initialized!");
return SectionHeaderTable->sh_size;
}
return Header->e_shnum;
}
template <class ELFT>
typename ELFFile<ELFT>::uintX_t ELFFile<ELFT>::getStringTableIndex() const {
if (Header->e_shnum == ELF::SHN_UNDEF) {
if (Header->e_shstrndx == ELF::SHN_HIRESERVE)
return SectionHeaderTable->sh_link;
if (Header->e_shstrndx >= getNumSections())
return 0;
}
return Header->e_shstrndx;
}
template <class ELFT>
ELFFile<ELFT>::ELFFile(StringRef Object, std::error_code &EC)
: Buf(Object) {
const uint64_t FileSize = Buf.size();
if (sizeof(Elf_Ehdr) > FileSize) {
// File too short!
EC = object_error::parse_failed;
return;
}
Header = reinterpret_cast<const Elf_Ehdr *>(base());
if (Header->e_shoff == 0) {
scanDynamicTable();
return;
}
const uint64_t SectionTableOffset = Header->e_shoff;
if (SectionTableOffset + sizeof(Elf_Shdr) > FileSize) {
// Section header table goes past end of file!
EC = object_error::parse_failed;
return;
}
// The getNumSections() call below depends on SectionHeaderTable being set.
SectionHeaderTable =
reinterpret_cast<const Elf_Shdr *>(base() + SectionTableOffset);
const uint64_t SectionTableSize = getNumSections() * Header->e_shentsize;
if (SectionTableOffset + SectionTableSize > FileSize) {
// Section table goes past end of file!
EC = object_error::parse_failed;
return;
}
// Scan sections for special sections.
for (const Elf_Shdr &Sec : sections()) {
switch (Sec.sh_type) {
case ELF::SHT_HASH:
if (HashTable) {
EC = object_error::parse_failed;
return;
}
HashTable = reinterpret_cast<const Elf_Hash *>(base() + Sec.sh_offset);
break;
case ELF::SHT_SYMTAB_SHNDX:
if (SymbolTableSectionHeaderIndex) {
// More than one .symtab_shndx!
EC = object_error::parse_failed;
return;
}
SymbolTableSectionHeaderIndex = &Sec;
break;
case ELF::SHT_SYMTAB: {
if (dot_symtab_sec) {
// More than one .symtab!
EC = object_error::parse_failed;
return;
}
dot_symtab_sec = &Sec;
ErrorOr<const Elf_Shdr *> SectionOrErr = getSection(Sec.sh_link);
if ((EC = SectionOrErr.getError()))
return;
ErrorOr<StringRef> SymtabOrErr = getStringTable(*SectionOrErr);
if ((EC = SymtabOrErr.getError()))
return;
DotStrtab = *SymtabOrErr;
} break;
case ELF::SHT_DYNSYM: {
if (DotDynSymSec) {
// More than one .dynsym!
EC = object_error::parse_failed;
return;
}
DotDynSymSec = &Sec;
ErrorOr<const Elf_Shdr *> SectionOrErr = getSection(Sec.sh_link);
if ((EC = SectionOrErr.getError()))
return;
ErrorOr<StringRef> SymtabOrErr = getStringTable(*SectionOrErr);
if ((EC = SymtabOrErr.getError()))
return;
DynStrRegion.Addr = SymtabOrErr->data();
DynStrRegion.Size = SymtabOrErr->size();
DynStrRegion.EntSize = 1;
break;
}
case ELF::SHT_DYNAMIC:
if (DynamicRegion.Addr) {
// More than one .dynamic!
EC = object_error::parse_failed;
return;
}
DynamicRegion.Addr = base() + Sec.sh_offset;
DynamicRegion.Size = Sec.sh_size;
DynamicRegion.EntSize = Sec.sh_entsize;
break;
case ELF::SHT_GNU_versym:
if (dot_gnu_version_sec != nullptr) {
// More than one .gnu.version section!
EC = object_error::parse_failed;
return;
}
dot_gnu_version_sec = &Sec;
break;
case ELF::SHT_GNU_verdef:
if (dot_gnu_version_d_sec != nullptr) {
// More than one .gnu.version_d section!
EC = object_error::parse_failed;
return;
}
dot_gnu_version_d_sec = &Sec;
break;
case ELF::SHT_GNU_verneed:
if (dot_gnu_version_r_sec != nullptr) {
// More than one .gnu.version_r section!
EC = object_error::parse_failed;
return;
}
dot_gnu_version_r_sec = &Sec;
break;
}
}
// Get string table sections.
ErrorOr<const Elf_Shdr *> StrTabSecOrErr = getSection(getStringTableIndex());
if ((EC = StrTabSecOrErr.getError()))
return;
ErrorOr<StringRef> SymtabOrErr = getStringTable(*StrTabSecOrErr);
if ((EC = SymtabOrErr.getError()))
return;
DotShstrtab = *SymtabOrErr;
// Build symbol name side-mapping if there is one.
if (SymbolTableSectionHeaderIndex) {
const Elf_Word *ShndxTable = reinterpret_cast<const Elf_Word*>(base() +
SymbolTableSectionHeaderIndex->sh_offset);
for (const Elf_Sym &S : symbols()) {
if (*ShndxTable != ELF::SHN_UNDEF)
ExtendedSymbolTable[&S] = *ShndxTable;
++ShndxTable;
}
}
scanDynamicTable();
EC = std::error_code();
}
template <class ELFT>
void ELFFile<ELFT>::scanDynamicTable() {
// Build load-address to file-offset map.
typedef IntervalMap<
uintX_t, uintptr_t,
IntervalMapImpl::NodeSizer<uintX_t, uintptr_t>::LeafSize,
IntervalMapHalfOpenInfo<uintX_t>> LoadMapT;
typename LoadMapT::Allocator Alloc;
// Allocate the IntervalMap on the heap to work around MSVC bug where the
// stack doesn't get realigned despite LoadMap having alignment 8 (PR24113).
std::unique_ptr<LoadMapT> LoadMap(new LoadMapT(Alloc));
for (Elf_Phdr_Iter PhdrI = program_header_begin(),
PhdrE = program_header_end();
PhdrI != PhdrE; ++PhdrI) {
if (PhdrI->p_type == ELF::PT_DYNAMIC) {
DynamicRegion.Addr = base() + PhdrI->p_offset;
DynamicRegion.Size = PhdrI->p_filesz;
DynamicRegion.EntSize = sizeof(Elf_Dyn);
continue;
}
if (PhdrI->p_type != ELF::PT_LOAD)
continue;
if (PhdrI->p_filesz == 0)
continue;
LoadMap->insert(PhdrI->p_vaddr, PhdrI->p_vaddr + PhdrI->p_filesz,
PhdrI->p_offset);
}
auto toMappedAddr = [&](uint64_t VAddr) -> const uint8_t * {
auto I = LoadMap->find(VAddr);
if (I == LoadMap->end())
return nullptr;
return this->base() + I.value() + (VAddr - I.start());
};
for (Elf_Dyn_Iter DynI = dynamic_table_begin(), DynE = dynamic_table_end();
DynI != DynE; ++DynI) {
switch (DynI->d_tag) {
case ELF::DT_HASH:
if (HashTable)
continue;
HashTable =
reinterpret_cast<const Elf_Hash *>(toMappedAddr(DynI->getPtr()));
break;
case ELF::DT_STRTAB:
if (!DynStrRegion.Addr)
DynStrRegion.Addr = toMappedAddr(DynI->getPtr());
break;
case ELF::DT_STRSZ:
if (!DynStrRegion.Size)
DynStrRegion.Size = DynI->getVal();
break;
case ELF::DT_RELA:
if (!DynRelaRegion.Addr)
DynRelaRegion.Addr = toMappedAddr(DynI->getPtr());
break;
case ELF::DT_RELASZ:
DynRelaRegion.Size = DynI->getVal();
break;
case ELF::DT_RELAENT:
DynRelaRegion.EntSize = DynI->getVal();
}
}
}
template <class ELFT>
const typename ELFFile<ELFT>::Elf_Shdr *ELFFile<ELFT>::section_begin() const {
if (Header->e_shentsize != sizeof(Elf_Shdr))
report_fatal_error(
"Invalid section header entry size (e_shentsize) in ELF header");
return reinterpret_cast<const Elf_Shdr *>(base() + Header->e_shoff);
}
template <class ELFT>
const typename ELFFile<ELFT>::Elf_Shdr *ELFFile<ELFT>::section_end() const {
return section_begin() + getNumSections();
}
template <class ELFT>
const typename ELFFile<ELFT>::Elf_Sym *ELFFile<ELFT>::symbol_begin() const {
if (!dot_symtab_sec)
return nullptr;
if (dot_symtab_sec->sh_entsize != sizeof(Elf_Sym))
report_fatal_error("Invalid symbol size");
return reinterpret_cast<const Elf_Sym *>(base() + dot_symtab_sec->sh_offset);
}
template <class ELFT>
const typename ELFFile<ELFT>::Elf_Sym *ELFFile<ELFT>::symbol_end() const {
if (!dot_symtab_sec)
return nullptr;
return reinterpret_cast<const Elf_Sym *>(base() + dot_symtab_sec->sh_offset +
dot_symtab_sec->sh_size);
}
template <class ELFT>
typename ELFFile<ELFT>::Elf_Dyn_Iter
ELFFile<ELFT>::dynamic_table_begin() const {
if (DynamicRegion.Addr)
return Elf_Dyn_Iter(DynamicRegion.EntSize,
(const char *)DynamicRegion.Addr);
return Elf_Dyn_Iter(0, nullptr);
}
template <class ELFT>
typename ELFFile<ELFT>::Elf_Dyn_Iter
ELFFile<ELFT>::dynamic_table_end(bool NULLEnd) const {
if (!DynamicRegion.Addr)
return Elf_Dyn_Iter(0, nullptr);
Elf_Dyn_Iter Ret(DynamicRegion.EntSize,
(const char *)DynamicRegion.Addr + DynamicRegion.Size);
if (NULLEnd) {
Elf_Dyn_Iter Start = dynamic_table_begin();
while (Start != Ret && Start->getTag() != ELF::DT_NULL)
++Start;
// Include the DT_NULL.
if (Start != Ret)
++Start;
Ret = Start;
}
return Ret;
}
template <class ELFT>
StringRef ELFFile<ELFT>::getLoadName() const {
if (!dt_soname) {
dt_soname = "";
// Find the DT_SONAME entry
for (const auto &Entry : dynamic_table())
if (Entry.getTag() == ELF::DT_SONAME) {
dt_soname = getDynamicString(Entry.getVal());
break;
}
}
return dt_soname;
}
template <class ELFT>
template <typename T>
const T *ELFFile<ELFT>::getEntry(uint32_t Section, uint32_t Entry) const {
ErrorOr<const Elf_Shdr *> Sec = getSection(Section);
if (std::error_code EC = Sec.getError())
report_fatal_error(EC.message());
return getEntry<T>(*Sec, Entry);
}
template <class ELFT>
template <typename T>
const T *ELFFile<ELFT>::getEntry(const Elf_Shdr *Section,
uint32_t Entry) const {
return reinterpret_cast<const T *>(base() + Section->sh_offset +
(Entry * Section->sh_entsize));
}
template <class ELFT>
ErrorOr<const typename ELFFile<ELFT>::Elf_Shdr *>
ELFFile<ELFT>::getSection(uint32_t Index) const {
assert(SectionHeaderTable && "SectionHeaderTable not initialized!");
if (Index >= getNumSections())
return object_error::invalid_section_index;
return reinterpret_cast<const Elf_Shdr *>(
reinterpret_cast<const char *>(SectionHeaderTable) +
(Index * Header->e_shentsize));
}
template <class ELFT>
ErrorOr<StringRef>
ELFFile<ELFT>::getStringTable(const Elf_Shdr *Section) const {
if (Section->sh_type != ELF::SHT_STRTAB)
return object_error::parse_failed;
uint64_t Offset = Section->sh_offset;
uint64_t Size = Section->sh_size;
if (Offset + Size > Buf.size())
return object_error::parse_failed;
StringRef Data((const char *)base() + Section->sh_offset, Size);
if (Data[Size - 1] != '\0')
return object_error::string_table_non_null_end;
return Data;
}
template <class ELFT>
const char *ELFFile<ELFT>::getDynamicString(uintX_t Offset) const {
if (Offset >= DynStrRegion.Size)
return nullptr;
return (const char *)DynStrRegion.Addr + Offset;
}
template <class ELFT>
ErrorOr<StringRef>
ELFFile<ELFT>::getStaticSymbolName(const Elf_Sym *Symb) const {
return Symb->getName(DotStrtab);
}
template <class ELFT>
ErrorOr<StringRef>
ELFFile<ELFT>::getDynamicSymbolName(const Elf_Sym *Symb) const {
return StringRef(getDynamicString(Symb->st_name));
}
template <class ELFT>
ErrorOr<StringRef> ELFFile<ELFT>::getSymbolName(const Elf_Sym *Symb,
bool IsDynamic) const {
if (IsDynamic)
return getDynamicSymbolName(Symb);
return getStaticSymbolName(Symb);
}
template <class ELFT>
ErrorOr<StringRef>
ELFFile<ELFT>::getSectionName(const Elf_Shdr *Section) const {
uint32_t Offset = Section->sh_name;
if (Offset >= DotShstrtab.size())
return object_error::parse_failed;
return StringRef(DotShstrtab.data() + Offset);
}
template <class ELFT>
ErrorOr<StringRef> ELFFile<ELFT>::getSymbolVersion(const Elf_Shdr *section,
const Elf_Sym *symb,
bool &IsDefault) const {
StringRef StrTab;
if (section) {
ErrorOr<StringRef> StrTabOrErr = getStringTable(section);
if (std::error_code EC = StrTabOrErr.getError())
return EC;
StrTab = *StrTabOrErr;
}
// Handle non-dynamic symbols.
if (section != DotDynSymSec && section != nullptr) {
// Non-dynamic symbols can have versions in their names
// A name of the form 'foo@V1' indicates version 'V1', non-default.
// A name of the form 'foo@@V2' indicates version 'V2', default version.
ErrorOr<StringRef> SymName = symb->getName(StrTab);
if (!SymName)
return SymName;
StringRef Name = *SymName;
size_t atpos = Name.find('@');
if (atpos == StringRef::npos) {
IsDefault = false;
return StringRef("");
}
++atpos;
if (atpos < Name.size() && Name[atpos] == '@') {
IsDefault = true;
++atpos;
} else {
IsDefault = false;
}
return Name.substr(atpos);
}
// This is a dynamic symbol. Look in the GNU symbol version table.
if (!dot_gnu_version_sec) {
// No version table.
IsDefault = false;
return StringRef("");
}
// Determine the position in the symbol table of this entry.
size_t entry_index =
(reinterpret_cast<uintptr_t>(symb) - DotDynSymSec->sh_offset -
reinterpret_cast<uintptr_t>(base())) /
sizeof(Elf_Sym);
// Get the corresponding version index entry
const Elf_Versym *vs = getEntry<Elf_Versym>(dot_gnu_version_sec, entry_index);
size_t version_index = vs->vs_index & ELF::VERSYM_VERSION;
// Special markers for unversioned symbols.
if (version_index == ELF::VER_NDX_LOCAL ||
version_index == ELF::VER_NDX_GLOBAL) {
IsDefault = false;
return StringRef("");
}
// Lookup this symbol in the version table
LoadVersionMap();
if (version_index >= VersionMap.size() || VersionMap[version_index].isNull())
return object_error::parse_failed;
const VersionMapEntry &entry = VersionMap[version_index];
// Get the version name string
size_t name_offset;
if (entry.isVerdef()) {
// The first Verdaux entry holds the name.
name_offset = entry.getVerdef()->getAux()->vda_name;
} else {
name_offset = entry.getVernaux()->vna_name;
}
// Set IsDefault
if (entry.isVerdef()) {
IsDefault = !(vs->vs_index & ELF::VERSYM_HIDDEN);
} else {
IsDefault = false;
}
if (name_offset >= DynStrRegion.Size)
return object_error::parse_failed;
return StringRef(getDynamicString(name_offset));
}
/// This function returns the hash value for a symbol in the .dynsym section
/// Name of the API remains consistent as specified in the libelf
/// REF : http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
static inline unsigned elf_hash(StringRef &symbolName) {
unsigned h = 0, g;
for (unsigned i = 0, j = symbolName.size(); i < j; i++) {
h = (h << 4) + symbolName[i];
g = h & 0xf0000000L;
if (g != 0)
h ^= g >> 24;
h &= ~g;
}
return h;
}
} // end namespace object
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/StackMapParser.h | //===-------- StackMapParser.h - StackMap Parsing Support -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_STACKMAPPARSER_H
#define LLVM_CODEGEN_STACKMAPPARSER_H
#include "llvm/Support/Debug.h"
#include "llvm/Support/Endian.h"
#include <map>
#include <vector>
namespace llvm {
template <support::endianness Endianness>
class StackMapV1Parser {
public:
template <typename AccessorT>
class AccessorIterator {
public:
AccessorIterator(AccessorT A) : A(A) {}
AccessorIterator& operator++() { A = A.next(); return *this; }
AccessorIterator operator++(int) {
auto tmp = *this;
++*this;
return tmp;
}
bool operator==(const AccessorIterator &Other) {
return A.P == Other.A.P;
}
bool operator!=(const AccessorIterator &Other) { return !(*this == Other); }
AccessorT& operator*() { return A; }
AccessorT* operator->() { return &A; }
private:
AccessorT A;
};
/// Accessor for function records.
class FunctionAccessor {
friend class StackMapV1Parser;
public:
/// Get the function address.
uint64_t getFunctionAddress() const {
return read<uint64_t>(P);
}
/// Get the function's stack size.
uint32_t getStackSize() const {
return read<uint64_t>(P + sizeof(uint64_t));
}
private:
FunctionAccessor(const uint8_t *P) : P(P) {}
const static int FunctionAccessorSize = 2 * sizeof(uint64_t);
FunctionAccessor next() const {
return FunctionAccessor(P + FunctionAccessorSize);
}
const uint8_t *P;
};
/// Accessor for constants.
class ConstantAccessor {
friend class StackMapV1Parser;
public:
/// Return the value of this constant.
uint64_t getValue() const { return read<uint64_t>(P); }
private:
ConstantAccessor(const uint8_t *P) : P(P) {}
const static int ConstantAccessorSize = sizeof(uint64_t);
ConstantAccessor next() const {
return ConstantAccessor(P + ConstantAccessorSize);
}
const uint8_t *P;
};
// Forward-declare RecordAccessor so we can friend it below.
class RecordAccessor;
enum class LocationKind : uint8_t {
Register = 1, Direct = 2, Indirect = 3, Constant = 4, ConstantIndex = 5
};
/// Accessor for location records.
class LocationAccessor {
friend class StackMapV1Parser;
friend class RecordAccessor;
public:
/// Get the Kind for this location.
LocationKind getKind() const {
return LocationKind(P[KindOffset]);
}
/// Get the Dwarf register number for this location.
uint16_t getDwarfRegNum() const {
return read<uint16_t>(P + DwarfRegNumOffset);
}
/// Get the small-constant for this location. (Kind must be Constant).
uint32_t getSmallConstant() const {
assert(getKind() == LocationKind::Constant && "Not a small constant.");
return read<uint32_t>(P + SmallConstantOffset);
}
/// Get the constant-index for this location. (Kind must be ConstantIndex).
uint32_t getConstantIndex() const {
assert(getKind() == LocationKind::ConstantIndex &&
"Not a constant-index.");
return read<uint32_t>(P + SmallConstantOffset);
}
/// Get the offset for this location. (Kind must be Direct or Indirect).
int32_t getOffset() const {
assert((getKind() == LocationKind::Direct ||
getKind() == LocationKind::Indirect) &&
"Not direct or indirect.");
return read<int32_t>(P + SmallConstantOffset);
}
private:
LocationAccessor(const uint8_t *P) : P(P) {}
LocationAccessor next() const {
return LocationAccessor(P + LocationAccessorSize);
}
static const int KindOffset = 0;
static const int DwarfRegNumOffset = KindOffset + sizeof(uint16_t);
static const int SmallConstantOffset = DwarfRegNumOffset + sizeof(uint16_t);
static const int LocationAccessorSize = sizeof(uint64_t);
const uint8_t *P;
};
/// Accessor for stackmap live-out fields.
class LiveOutAccessor {
friend class StackMapV1Parser;
friend class RecordAccessor;
public:
/// Get the Dwarf register number for this live-out.
uint16_t getDwarfRegNum() const {
return read<uint16_t>(P + DwarfRegNumOffset);
}
/// Get the size in bytes of live [sub]register.
unsigned getSizeInBytes() const {
return read<uint8_t>(P + SizeOffset);
}
private:
LiveOutAccessor(const uint8_t *P) : P(P) {}
LiveOutAccessor next() const {
return LiveOutAccessor(P + LiveOutAccessorSize);
}
static const int DwarfRegNumOffset = 0;
static const int SizeOffset =
DwarfRegNumOffset + sizeof(uint16_t) + sizeof(uint8_t);
static const int LiveOutAccessorSize = sizeof(uint32_t);
const uint8_t *P;
};
/// Accessor for stackmap records.
class RecordAccessor {
friend class StackMapV1Parser;
public:
typedef AccessorIterator<LocationAccessor> location_iterator;
typedef AccessorIterator<LiveOutAccessor> liveout_iterator;
/// Get the patchpoint/stackmap ID for this record.
uint64_t getID() const {
return read<uint64_t>(P + PatchpointIDOffset);
}
/// Get the instruction offset (from the start of the containing function)
/// for this record.
uint32_t getInstructionOffset() const {
return read<uint32_t>(P + InstructionOffsetOffset);
}
/// Get the number of locations contained in this record.
uint16_t getNumLocations() const {
return read<uint16_t>(P + NumLocationsOffset);
}
/// Get the location with the given index.
LocationAccessor getLocation(unsigned LocationIndex) const {
unsigned LocationOffset =
LocationListOffset + LocationIndex * LocationSize;
return LocationAccessor(P + LocationOffset);
}
/// Begin iterator for locations.
location_iterator location_begin() const {
return location_iterator(getLocation(0));
}
/// End iterator for locations.
location_iterator location_end() const {
return location_iterator(getLocation(getNumLocations()));
}
/// Iterator range for locations.
iterator_range<location_iterator> locations() const {
return make_range(location_begin(), location_end());
}
/// Get the number of liveouts contained in this record.
uint16_t getNumLiveOuts() const {
return read<uint16_t>(P + getNumLiveOutsOffset());
}
/// Get the live-out with the given index.
LiveOutAccessor getLiveOut(unsigned LiveOutIndex) const {
unsigned LiveOutOffset =
getNumLiveOutsOffset() + sizeof(uint16_t) + LiveOutIndex * LiveOutSize;
return LiveOutAccessor(P + LiveOutOffset);
}
/// Begin iterator for live-outs.
liveout_iterator liveouts_begin() const {
return liveout_iterator(getLiveOut(0));
}
/// End iterator for live-outs.
liveout_iterator liveouts_end() const {
return liveout_iterator(getLiveOut(getNumLiveOuts()));
}
/// Iterator range for live-outs.
iterator_range<liveout_iterator> liveouts() const {
return make_range(liveouts_begin(), liveouts_end());
}
private:
RecordAccessor(const uint8_t *P) : P(P) {}
unsigned getNumLiveOutsOffset() const {
return LocationListOffset + LocationSize * getNumLocations() +
sizeof(uint16_t);
}
unsigned getSizeInBytes() const {
unsigned RecordSize =
getNumLiveOutsOffset() + sizeof(uint16_t) + getNumLiveOuts() * LiveOutSize;
return (RecordSize + 7) & ~0x7;
}
RecordAccessor next() const {
return RecordAccessor(P + getSizeInBytes());
}
static const unsigned PatchpointIDOffset = 0;
static const unsigned InstructionOffsetOffset =
PatchpointIDOffset + sizeof(uint64_t);
static const unsigned NumLocationsOffset =
InstructionOffsetOffset + sizeof(uint32_t) + sizeof(uint16_t);
static const unsigned LocationListOffset =
NumLocationsOffset + sizeof(uint16_t);
static const unsigned LocationSize = sizeof(uint64_t);
static const unsigned LiveOutSize = sizeof(uint32_t);
const uint8_t *P;
};
/// Construct a parser for a version-1 stackmap. StackMap data will be read
/// from the given array.
StackMapV1Parser(ArrayRef<uint8_t> StackMapSection)
: StackMapSection(StackMapSection) {
ConstantsListOffset = FunctionListOffset + getNumFunctions() * FunctionSize;
assert(StackMapSection[0] == 1 &&
"StackMapV1Parser can only parse version 1 stackmaps");
unsigned CurrentRecordOffset =
ConstantsListOffset + getNumConstants() * ConstantSize;
for (unsigned I = 0, E = getNumRecords(); I != E; ++I) {
StackMapRecordOffsets.push_back(CurrentRecordOffset);
CurrentRecordOffset +=
RecordAccessor(&StackMapSection[CurrentRecordOffset]).getSizeInBytes();
}
}
typedef AccessorIterator<FunctionAccessor> function_iterator;
typedef AccessorIterator<ConstantAccessor> constant_iterator;
typedef AccessorIterator<RecordAccessor> record_iterator;
/// Get the version number of this stackmap. (Always returns 1).
unsigned getVersion() const { return 1; }
/// Get the number of functions in the stack map.
uint32_t getNumFunctions() const {
return read<uint32_t>(&StackMapSection[NumFunctionsOffset]);
}
/// Get the number of large constants in the stack map.
uint32_t getNumConstants() const {
return read<uint32_t>(&StackMapSection[NumConstantsOffset]);
}
/// Get the number of stackmap records in the stackmap.
uint32_t getNumRecords() const {
return read<uint32_t>(&StackMapSection[NumRecordsOffset]);
}
/// Return an FunctionAccessor for the given function index.
FunctionAccessor getFunction(unsigned FunctionIndex) const {
return FunctionAccessor(StackMapSection.data() +
getFunctionOffset(FunctionIndex));
}
/// Begin iterator for functions.
function_iterator functions_begin() const {
return function_iterator(getFunction(0));
}
/// End iterator for functions.
function_iterator functions_end() const {
return function_iterator(
FunctionAccessor(StackMapSection.data() +
getFunctionOffset(getNumFunctions())));
}
/// Iterator range for functions.
iterator_range<function_iterator> functions() const {
return make_range(functions_begin(), functions_end());
}
/// Return the large constant at the given index.
ConstantAccessor getConstant(unsigned ConstantIndex) const {
return ConstantAccessor(StackMapSection.data() +
getConstantOffset(ConstantIndex));
}
/// Begin iterator for constants.
constant_iterator constants_begin() const {
return constant_iterator(getConstant(0));
}
/// End iterator for constants.
constant_iterator constants_end() const {
return constant_iterator(
ConstantAccessor(StackMapSection.data() +
getConstantOffset(getNumConstants())));
}
/// Iterator range for constants.
iterator_range<constant_iterator> constants() const {
return make_range(constants_begin(), constants_end());
}
/// Return a RecordAccessor for the given record index.
RecordAccessor getRecord(unsigned RecordIndex) const {
std::size_t RecordOffset = StackMapRecordOffsets[RecordIndex];
return RecordAccessor(StackMapSection.data() + RecordOffset);
}
/// Begin iterator for records.
record_iterator records_begin() const {
if (getNumRecords() == 0)
return record_iterator(RecordAccessor(nullptr));
return record_iterator(getRecord(0));
}
/// End iterator for records.
record_iterator records_end() const {
// Records need to be handled specially, since we cache the start addresses
// for them: We can't just compute the 1-past-the-end address, we have to
// look at the last record and use the 'next' method.
if (getNumRecords() == 0)
return record_iterator(RecordAccessor(nullptr));
return record_iterator(getRecord(getNumRecords() - 1).next());
}
/// Iterator range for records.
iterator_range<record_iterator> records() const {
return make_range(records_begin(), records_end());
}
private:
template <typename T>
static T read(const uint8_t *P) {
return support::endian::read<T, Endianness, 1>(P);
}
static const unsigned HeaderOffset = 0;
static const unsigned NumFunctionsOffset = HeaderOffset + sizeof(uint32_t);
static const unsigned NumConstantsOffset = NumFunctionsOffset + sizeof(uint32_t);
static const unsigned NumRecordsOffset = NumConstantsOffset + sizeof(uint32_t);
static const unsigned FunctionListOffset = NumRecordsOffset + sizeof(uint32_t);
static const unsigned FunctionSize = 2 * sizeof(uint64_t);
static const unsigned ConstantSize = sizeof(uint64_t);
std::size_t getFunctionOffset(unsigned FunctionIndex) const {
return FunctionListOffset + FunctionIndex * FunctionSize;
}
std::size_t getConstantOffset(unsigned ConstantIndex) const {
return ConstantsListOffset + ConstantIndex * ConstantSize;
}
ArrayRef<uint8_t> StackMapSection;
unsigned ConstantsListOffset;
std::vector<unsigned> StackMapRecordOffsets;
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/SymbolicFile.h | //===- SymbolicFile.h - Interface that only provides symbols ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the SymbolicFile interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_SYMBOLICFILE_H
#define LLVM_OBJECT_SYMBOLICFILE_H
#include "llvm/Object/Binary.h"
namespace llvm {
namespace object {
union DataRefImpl {
// This entire union should probably be a
// char[max(8, sizeof(uintptr_t))] and require the impl to cast.
struct {
uint32_t a, b;
} d;
uintptr_t p;
DataRefImpl() { std::memset(this, 0, sizeof(DataRefImpl)); }
};
inline bool operator==(const DataRefImpl &a, const DataRefImpl &b) {
// Check bitwise identical. This is the only legal way to compare a union w/o
// knowing which member is in use.
return std::memcmp(&a, &b, sizeof(DataRefImpl)) == 0;
}
inline bool operator!=(const DataRefImpl &a, const DataRefImpl &b) {
return !operator==(a, b);
}
inline bool operator<(const DataRefImpl &a, const DataRefImpl &b) {
// Check bitwise identical. This is the only legal way to compare a union w/o
// knowing which member is in use.
return std::memcmp(&a, &b, sizeof(DataRefImpl)) < 0;
}
template <class content_type>
class content_iterator {
content_type Current;
public:
using iterator_category = std::forward_iterator_tag;
using value_type = content_type;
using difference_type = std::ptrdiff_t;
using pointer = value_type *;
using reference = value_type &;
content_iterator(content_type symb) : Current(symb) {}
const content_type *operator->() const { return &Current; }
const content_type &operator*() const { return Current; }
bool operator==(const content_iterator &other) const {
return Current == other.Current;
}
bool operator!=(const content_iterator &other) const {
return !(*this == other);
}
content_iterator &operator++() { // preincrement
Current.moveNext();
return *this;
}
};
class SymbolicFile;
/// This is a value type class that represents a single symbol in the list of
/// symbols in the object file.
class BasicSymbolRef {
DataRefImpl SymbolPimpl;
const SymbolicFile *OwningObject;
public:
// FIXME: should we add a SF_Text?
enum Flags : unsigned {
SF_None = 0,
SF_Undefined = 1U << 0, // Symbol is defined in another object file
SF_Global = 1U << 1, // Global symbol
SF_Weak = 1U << 2, // Weak symbol
SF_Absolute = 1U << 3, // Absolute symbol
SF_Common = 1U << 4, // Symbol has common linkage
SF_Indirect = 1U << 5, // Symbol is an alias to another symbol
SF_Exported = 1U << 6, // Symbol is visible to other DSOs
SF_FormatSpecific = 1U << 7, // Specific to the object file format
// (e.g. section symbols)
SF_Thumb = 1U << 8, // Thumb symbol in a 32-bit ARM binary
SF_Hidden = 1U << 9, // Symbol has hidden visibility
};
BasicSymbolRef() : OwningObject(nullptr) { }
BasicSymbolRef(DataRefImpl SymbolP, const SymbolicFile *Owner);
bool operator==(const BasicSymbolRef &Other) const;
bool operator<(const BasicSymbolRef &Other) const;
void moveNext();
std::error_code printName(raw_ostream &OS) const;
/// Get symbol flags (bitwise OR of SymbolRef::Flags)
uint32_t getFlags() const;
DataRefImpl getRawDataRefImpl() const;
const SymbolicFile *getObject() const;
};
typedef content_iterator<BasicSymbolRef> basic_symbol_iterator;
class SymbolicFile : public Binary {
public:
~SymbolicFile() override;
SymbolicFile(unsigned int Type, MemoryBufferRef Source);
// virtual interface.
virtual void moveSymbolNext(DataRefImpl &Symb) const = 0;
virtual std::error_code printSymbolName(raw_ostream &OS,
DataRefImpl Symb) const = 0;
virtual uint32_t getSymbolFlags(DataRefImpl Symb) const = 0;
virtual basic_symbol_iterator symbol_begin_impl() const = 0;
virtual basic_symbol_iterator symbol_end_impl() const = 0;
// convenience wrappers.
basic_symbol_iterator symbol_begin() const {
return symbol_begin_impl();
}
basic_symbol_iterator symbol_end() const {
return symbol_end_impl();
}
typedef iterator_range<basic_symbol_iterator> basic_symbol_iterator_range;
basic_symbol_iterator_range symbols() const {
return basic_symbol_iterator_range(symbol_begin(), symbol_end());
}
// construction aux.
static ErrorOr<std::unique_ptr<SymbolicFile>>
createSymbolicFile(MemoryBufferRef Object, sys::fs::file_magic Type,
LLVMContext *Context);
static ErrorOr<std::unique_ptr<SymbolicFile>>
createSymbolicFile(MemoryBufferRef Object) {
return createSymbolicFile(Object, sys::fs::file_magic::unknown, nullptr);
}
static ErrorOr<OwningBinary<SymbolicFile>>
createSymbolicFile(StringRef ObjectPath);
static inline bool classof(const Binary *v) {
return v->isSymbolic();
}
};
inline BasicSymbolRef::BasicSymbolRef(DataRefImpl SymbolP,
const SymbolicFile *Owner)
: SymbolPimpl(SymbolP), OwningObject(Owner) {}
inline bool BasicSymbolRef::operator==(const BasicSymbolRef &Other) const {
return SymbolPimpl == Other.SymbolPimpl;
}
inline bool BasicSymbolRef::operator<(const BasicSymbolRef &Other) const {
return SymbolPimpl < Other.SymbolPimpl;
}
inline void BasicSymbolRef::moveNext() {
return OwningObject->moveSymbolNext(SymbolPimpl);
}
inline std::error_code BasicSymbolRef::printName(raw_ostream &OS) const {
return OwningObject->printSymbolName(OS, SymbolPimpl);
}
inline uint32_t BasicSymbolRef::getFlags() const {
return OwningObject->getSymbolFlags(SymbolPimpl);
}
inline DataRefImpl BasicSymbolRef::getRawDataRefImpl() const {
return SymbolPimpl;
}
inline const SymbolicFile *BasicSymbolRef::getObject() const {
return OwningObject;
}
}
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/ArchiveWriter.h | //===- ArchiveWriter.h - ar archive file format writer ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Declares the writeArchive function for writing an archive file.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_ARCHIVEWRITER_H
#define LLVM_OBJECT_ARCHIVEWRITER_H
#include "llvm/ADT/StringRef.h"
#include "llvm/Object/Archive.h"
#include "llvm/Support/FileSystem.h"
namespace llvm {
class NewArchiveIterator {
bool IsNewMember;
StringRef Name;
object::Archive::child_iterator OldI;
StringRef NewFilename;
public:
NewArchiveIterator(object::Archive::child_iterator I, StringRef Name);
NewArchiveIterator(StringRef I, StringRef Name);
bool isNewMember() const;
StringRef getName() const;
object::Archive::child_iterator getOld() const;
StringRef getNew() const;
llvm::ErrorOr<int> getFD(sys::fs::file_status &NewStatus) const;
const sys::fs::file_status &getStatus() const;
};
std::pair<StringRef, std::error_code>
writeArchive(StringRef ArcName, std::vector<NewArchiveIterator> &NewMembers,
bool WriteSymtab, object::Archive::Kind Kind, bool Deterministic);
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/COFF.h | //===- COFF.h - COFF object file implementation -----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the COFFObjectFile class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_COFF_H
#define LLVM_OBJECT_COFF_H
#include "llvm/ADT/PointerUnion.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/COFF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorOr.h"
namespace llvm {
template <typename T> class ArrayRef;
namespace object {
class ImportDirectoryEntryRef;
class DelayImportDirectoryEntryRef;
class ExportDirectoryEntryRef;
class ImportedSymbolRef;
class BaseRelocRef;
typedef content_iterator<ImportDirectoryEntryRef> import_directory_iterator;
typedef content_iterator<DelayImportDirectoryEntryRef>
delay_import_directory_iterator;
typedef content_iterator<ExportDirectoryEntryRef> export_directory_iterator;
typedef content_iterator<ImportedSymbolRef> imported_symbol_iterator;
typedef content_iterator<BaseRelocRef> base_reloc_iterator;
/// The DOS compatible header at the front of all PE/COFF executables.
struct dos_header {
char Magic[2];
support::ulittle16_t UsedBytesInTheLastPage;
support::ulittle16_t FileSizeInPages;
support::ulittle16_t NumberOfRelocationItems;
support::ulittle16_t HeaderSizeInParagraphs;
support::ulittle16_t MinimumExtraParagraphs;
support::ulittle16_t MaximumExtraParagraphs;
support::ulittle16_t InitialRelativeSS;
support::ulittle16_t InitialSP;
support::ulittle16_t Checksum;
support::ulittle16_t InitialIP;
support::ulittle16_t InitialRelativeCS;
support::ulittle16_t AddressOfRelocationTable;
support::ulittle16_t OverlayNumber;
support::ulittle16_t Reserved[4];
support::ulittle16_t OEMid;
support::ulittle16_t OEMinfo;
support::ulittle16_t Reserved2[10];
support::ulittle32_t AddressOfNewExeHeader;
};
struct coff_file_header {
support::ulittle16_t Machine;
support::ulittle16_t NumberOfSections;
support::ulittle32_t TimeDateStamp;
support::ulittle32_t PointerToSymbolTable;
support::ulittle32_t NumberOfSymbols;
support::ulittle16_t SizeOfOptionalHeader;
support::ulittle16_t Characteristics;
bool isImportLibrary() const { return NumberOfSections == 0xffff; }
};
struct coff_bigobj_file_header {
support::ulittle16_t Sig1;
support::ulittle16_t Sig2;
support::ulittle16_t Version;
support::ulittle16_t Machine;
support::ulittle32_t TimeDateStamp;
uint8_t UUID[16];
support::ulittle32_t unused1;
support::ulittle32_t unused2;
support::ulittle32_t unused3;
support::ulittle32_t unused4;
support::ulittle32_t NumberOfSections;
support::ulittle32_t PointerToSymbolTable;
support::ulittle32_t NumberOfSymbols;
};
/// The 32-bit PE header that follows the COFF header.
struct pe32_header {
support::ulittle16_t Magic;
uint8_t MajorLinkerVersion;
uint8_t MinorLinkerVersion;
support::ulittle32_t SizeOfCode;
support::ulittle32_t SizeOfInitializedData;
support::ulittle32_t SizeOfUninitializedData;
support::ulittle32_t AddressOfEntryPoint;
support::ulittle32_t BaseOfCode;
support::ulittle32_t BaseOfData;
support::ulittle32_t ImageBase;
support::ulittle32_t SectionAlignment;
support::ulittle32_t FileAlignment;
support::ulittle16_t MajorOperatingSystemVersion;
support::ulittle16_t MinorOperatingSystemVersion;
support::ulittle16_t MajorImageVersion;
support::ulittle16_t MinorImageVersion;
support::ulittle16_t MajorSubsystemVersion;
support::ulittle16_t MinorSubsystemVersion;
support::ulittle32_t Win32VersionValue;
support::ulittle32_t SizeOfImage;
support::ulittle32_t SizeOfHeaders;
support::ulittle32_t CheckSum;
support::ulittle16_t Subsystem;
// FIXME: This should be DllCharacteristics.
support::ulittle16_t DLLCharacteristics;
support::ulittle32_t SizeOfStackReserve;
support::ulittle32_t SizeOfStackCommit;
support::ulittle32_t SizeOfHeapReserve;
support::ulittle32_t SizeOfHeapCommit;
support::ulittle32_t LoaderFlags;
// FIXME: This should be NumberOfRvaAndSizes.
support::ulittle32_t NumberOfRvaAndSize;
};
/// The 64-bit PE header that follows the COFF header.
struct pe32plus_header {
support::ulittle16_t Magic;
uint8_t MajorLinkerVersion;
uint8_t MinorLinkerVersion;
support::ulittle32_t SizeOfCode;
support::ulittle32_t SizeOfInitializedData;
support::ulittle32_t SizeOfUninitializedData;
support::ulittle32_t AddressOfEntryPoint;
support::ulittle32_t BaseOfCode;
support::ulittle64_t ImageBase;
support::ulittle32_t SectionAlignment;
support::ulittle32_t FileAlignment;
support::ulittle16_t MajorOperatingSystemVersion;
support::ulittle16_t MinorOperatingSystemVersion;
support::ulittle16_t MajorImageVersion;
support::ulittle16_t MinorImageVersion;
support::ulittle16_t MajorSubsystemVersion;
support::ulittle16_t MinorSubsystemVersion;
support::ulittle32_t Win32VersionValue;
support::ulittle32_t SizeOfImage;
support::ulittle32_t SizeOfHeaders;
support::ulittle32_t CheckSum;
support::ulittle16_t Subsystem;
support::ulittle16_t DLLCharacteristics;
support::ulittle64_t SizeOfStackReserve;
support::ulittle64_t SizeOfStackCommit;
support::ulittle64_t SizeOfHeapReserve;
support::ulittle64_t SizeOfHeapCommit;
support::ulittle32_t LoaderFlags;
support::ulittle32_t NumberOfRvaAndSize;
};
struct data_directory {
support::ulittle32_t RelativeVirtualAddress;
support::ulittle32_t Size;
};
struct import_directory_table_entry {
support::ulittle32_t ImportLookupTableRVA;
support::ulittle32_t TimeDateStamp;
support::ulittle32_t ForwarderChain;
support::ulittle32_t NameRVA;
support::ulittle32_t ImportAddressTableRVA;
};
template <typename IntTy>
struct import_lookup_table_entry {
IntTy Data;
bool isOrdinal() const { return Data < 0; }
uint16_t getOrdinal() const {
assert(isOrdinal() && "ILT entry is not an ordinal!");
return Data & 0xFFFF;
}
uint32_t getHintNameRVA() const {
assert(!isOrdinal() && "ILT entry is not a Hint/Name RVA!");
return Data & 0xFFFFFFFF;
}
};
typedef import_lookup_table_entry<support::little32_t>
import_lookup_table_entry32;
typedef import_lookup_table_entry<support::little64_t>
import_lookup_table_entry64;
struct delay_import_directory_table_entry {
// dumpbin reports this field as "Characteristics" instead of "Attributes".
support::ulittle32_t Attributes;
support::ulittle32_t Name;
support::ulittle32_t ModuleHandle;
support::ulittle32_t DelayImportAddressTable;
support::ulittle32_t DelayImportNameTable;
support::ulittle32_t BoundDelayImportTable;
support::ulittle32_t UnloadDelayImportTable;
support::ulittle32_t TimeStamp;
};
struct export_directory_table_entry {
support::ulittle32_t ExportFlags;
support::ulittle32_t TimeDateStamp;
support::ulittle16_t MajorVersion;
support::ulittle16_t MinorVersion;
support::ulittle32_t NameRVA;
support::ulittle32_t OrdinalBase;
support::ulittle32_t AddressTableEntries;
support::ulittle32_t NumberOfNamePointers;
support::ulittle32_t ExportAddressTableRVA;
support::ulittle32_t NamePointerRVA;
support::ulittle32_t OrdinalTableRVA;
};
union export_address_table_entry {
support::ulittle32_t ExportRVA;
support::ulittle32_t ForwarderRVA;
};
typedef support::ulittle32_t export_name_pointer_table_entry;
typedef support::ulittle16_t export_ordinal_table_entry;
struct StringTableOffset {
support::ulittle32_t Zeroes;
support::ulittle32_t Offset;
};
template <typename SectionNumberType>
struct coff_symbol {
union {
char ShortName[COFF::NameSize];
StringTableOffset Offset;
} Name;
support::ulittle32_t Value;
SectionNumberType SectionNumber;
support::ulittle16_t Type;
uint8_t StorageClass;
uint8_t NumberOfAuxSymbols;
};
typedef coff_symbol<support::ulittle16_t> coff_symbol16;
typedef coff_symbol<support::ulittle32_t> coff_symbol32;
// Contains only common parts of coff_symbol16 and coff_symbol32.
struct coff_symbol_generic {
union {
char ShortName[COFF::NameSize];
StringTableOffset Offset;
} Name;
support::ulittle32_t Value;
};
class COFFSymbolRef {
public:
COFFSymbolRef(const coff_symbol16 *CS) : CS16(CS), CS32(nullptr) {}
COFFSymbolRef(const coff_symbol32 *CS) : CS16(nullptr), CS32(CS) {}
COFFSymbolRef() : CS16(nullptr), CS32(nullptr) {}
const void *getRawPtr() const {
return CS16 ? static_cast<const void *>(CS16) : CS32;
}
const coff_symbol_generic *getGeneric() const {
if (CS16)
return reinterpret_cast<const coff_symbol_generic *>(CS16);
return reinterpret_cast<const coff_symbol_generic *>(CS32);
}
friend bool operator<(COFFSymbolRef A, COFFSymbolRef B) {
return A.getRawPtr() < B.getRawPtr();
}
bool isBigObj() const {
if (CS16)
return false;
if (CS32)
return true;
llvm_unreachable("COFFSymbolRef points to nothing!");
}
const char *getShortName() const {
return CS16 ? CS16->Name.ShortName : CS32->Name.ShortName;
}
const StringTableOffset &getStringTableOffset() const {
assert(isSet() && "COFFSymbolRef points to nothing!");
return CS16 ? CS16->Name.Offset : CS32->Name.Offset;
}
uint32_t getValue() const { return CS16 ? CS16->Value : CS32->Value; }
int32_t getSectionNumber() const {
assert(isSet() && "COFFSymbolRef points to nothing!");
if (CS16) {
// Reserved sections are returned as negative numbers.
if (CS16->SectionNumber <= COFF::MaxNumberOfSections16)
return CS16->SectionNumber;
return static_cast<int16_t>(CS16->SectionNumber);
}
return static_cast<int32_t>(CS32->SectionNumber);
}
uint16_t getType() const {
assert(isSet() && "COFFSymbolRef points to nothing!");
return CS16 ? CS16->Type : CS32->Type;
}
uint8_t getStorageClass() const {
assert(isSet() && "COFFSymbolRef points to nothing!");
return CS16 ? CS16->StorageClass : CS32->StorageClass;
}
uint8_t getNumberOfAuxSymbols() const {
assert(isSet() && "COFFSymbolRef points to nothing!");
return CS16 ? CS16->NumberOfAuxSymbols : CS32->NumberOfAuxSymbols;
}
uint8_t getBaseType() const { return getType() & 0x0F; }
uint8_t getComplexType() const {
return (getType() & 0xF0) >> COFF::SCT_COMPLEX_TYPE_SHIFT;
}
bool isAbsolute() const {
return getSectionNumber() == -1;
}
bool isExternal() const {
return getStorageClass() == COFF::IMAGE_SYM_CLASS_EXTERNAL;
}
bool isCommon() const {
return isExternal() && getSectionNumber() == COFF::IMAGE_SYM_UNDEFINED &&
getValue() != 0;
}
bool isUndefined() const {
return isExternal() && getSectionNumber() == COFF::IMAGE_SYM_UNDEFINED &&
getValue() == 0;
}
bool isWeakExternal() const {
return getStorageClass() == COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL;
}
bool isFunctionDefinition() const {
return isExternal() && getBaseType() == COFF::IMAGE_SYM_TYPE_NULL &&
getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION &&
!COFF::isReservedSectionNumber(getSectionNumber());
}
bool isFunctionLineInfo() const {
return getStorageClass() == COFF::IMAGE_SYM_CLASS_FUNCTION;
}
bool isAnyUndefined() const {
return isUndefined() || isWeakExternal();
}
bool isFileRecord() const {
return getStorageClass() == COFF::IMAGE_SYM_CLASS_FILE;
}
bool isSection() const {
return getStorageClass() == COFF::IMAGE_SYM_CLASS_SECTION;
}
bool isSectionDefinition() const {
// C++/CLI creates external ABS symbols for non-const appdomain globals.
// These are also followed by an auxiliary section definition.
bool isAppdomainGlobal =
getStorageClass() == COFF::IMAGE_SYM_CLASS_EXTERNAL &&
getSectionNumber() == COFF::IMAGE_SYM_ABSOLUTE;
bool isOrdinarySection = getStorageClass() == COFF::IMAGE_SYM_CLASS_STATIC;
if (!getNumberOfAuxSymbols())
return false;
return isAppdomainGlobal || isOrdinarySection;
}
bool isCLRToken() const {
return getStorageClass() == COFF::IMAGE_SYM_CLASS_CLR_TOKEN;
}
private:
bool isSet() const { return CS16 || CS32; }
const coff_symbol16 *CS16;
const coff_symbol32 *CS32;
};
struct coff_section {
char Name[COFF::NameSize];
support::ulittle32_t VirtualSize;
support::ulittle32_t VirtualAddress;
support::ulittle32_t SizeOfRawData;
support::ulittle32_t PointerToRawData;
support::ulittle32_t PointerToRelocations;
support::ulittle32_t PointerToLinenumbers;
support::ulittle16_t NumberOfRelocations;
support::ulittle16_t NumberOfLinenumbers;
support::ulittle32_t Characteristics;
// Returns true if the actual number of relocations is stored in
// VirtualAddress field of the first relocation table entry.
bool hasExtendedRelocations() const {
return (Characteristics & COFF::IMAGE_SCN_LNK_NRELOC_OVFL) &&
NumberOfRelocations == UINT16_MAX;
}
};
struct coff_relocation {
support::ulittle32_t VirtualAddress;
support::ulittle32_t SymbolTableIndex;
support::ulittle16_t Type;
};
struct coff_aux_function_definition {
support::ulittle32_t TagIndex;
support::ulittle32_t TotalSize;
support::ulittle32_t PointerToLinenumber;
support::ulittle32_t PointerToNextFunction;
};
struct coff_aux_bf_and_ef_symbol {
char Unused1[4];
support::ulittle16_t Linenumber;
char Unused2[6];
support::ulittle32_t PointerToNextFunction;
};
struct coff_aux_weak_external {
support::ulittle32_t TagIndex;
support::ulittle32_t Characteristics;
};
struct coff_aux_section_definition {
support::ulittle32_t Length;
support::ulittle16_t NumberOfRelocations;
support::ulittle16_t NumberOfLinenumbers;
support::ulittle32_t CheckSum;
support::ulittle16_t NumberLowPart;
uint8_t Selection;
uint8_t Unused;
support::ulittle16_t NumberHighPart;
int32_t getNumber(bool IsBigObj) const {
uint32_t Number = static_cast<uint32_t>(NumberLowPart);
if (IsBigObj)
Number |= static_cast<uint32_t>(NumberHighPart) << 16;
return static_cast<int32_t>(Number);
}
};
struct coff_aux_clr_token {
uint8_t AuxType;
uint8_t Reserved;
support::ulittle32_t SymbolTableIndex;
};
struct coff_import_header {
support::ulittle16_t Sig1;
support::ulittle16_t Sig2;
support::ulittle16_t Version;
support::ulittle16_t Machine;
support::ulittle32_t TimeDateStamp;
support::ulittle32_t SizeOfData;
support::ulittle16_t OrdinalHint;
support::ulittle16_t TypeInfo;
int getType() const { return TypeInfo & 0x3; }
int getNameType() const { return (TypeInfo >> 2) & 0x7; }
};
struct coff_import_directory_table_entry {
support::ulittle32_t ImportLookupTableRVA;
support::ulittle32_t TimeDateStamp;
support::ulittle32_t ForwarderChain;
support::ulittle32_t NameRVA;
support::ulittle32_t ImportAddressTableRVA;
};
struct coff_load_configuration32 {
support::ulittle32_t Characteristics;
support::ulittle32_t TimeDateStamp;
support::ulittle16_t MajorVersion;
support::ulittle16_t MinorVersion;
support::ulittle32_t GlobalFlagsClear;
support::ulittle32_t GlobalFlagsSet;
support::ulittle32_t CriticalSectionDefaultTimeout;
support::ulittle32_t DeCommitFreeBlockThreshold;
support::ulittle32_t DeCommitTotalFreeThreshold;
support::ulittle32_t LockPrefixTable;
support::ulittle32_t MaximumAllocationSize;
support::ulittle32_t VirtualMemoryThreshold;
support::ulittle32_t ProcessAffinityMask;
support::ulittle32_t ProcessHeapFlags;
support::ulittle16_t CSDVersion;
support::ulittle16_t Reserved;
support::ulittle32_t EditList;
support::ulittle32_t SecurityCookie;
support::ulittle32_t SEHandlerTable;
support::ulittle32_t SEHandlerCount;
};
struct coff_load_configuration64 {
support::ulittle32_t Characteristics;
support::ulittle32_t TimeDateStamp;
support::ulittle16_t MajorVersion;
support::ulittle16_t MinorVersion;
support::ulittle32_t GlobalFlagsClear;
support::ulittle32_t GlobalFlagsSet;
support::ulittle32_t CriticalSectionDefaultTimeout;
support::ulittle32_t DeCommitFreeBlockThreshold;
support::ulittle32_t DeCommitTotalFreeThreshold;
support::ulittle32_t LockPrefixTable;
support::ulittle32_t MaximumAllocationSize;
support::ulittle32_t VirtualMemoryThreshold;
support::ulittle32_t ProcessAffinityMask;
support::ulittle32_t ProcessHeapFlags;
support::ulittle16_t CSDVersion;
support::ulittle16_t Reserved;
support::ulittle32_t EditList;
support::ulittle64_t SecurityCookie;
support::ulittle64_t SEHandlerTable;
support::ulittle64_t SEHandlerCount;
};
struct coff_runtime_function_x64 {
support::ulittle32_t BeginAddress;
support::ulittle32_t EndAddress;
support::ulittle32_t UnwindInformation;
};
struct coff_base_reloc_block_header {
support::ulittle32_t PageRVA;
support::ulittle32_t BlockSize;
};
struct coff_base_reloc_block_entry {
support::ulittle16_t Data;
int getType() const { return Data >> 12; }
int getOffset() const { return Data & ((1 << 12) - 1); }
};
class COFFObjectFile : public ObjectFile {
private:
friend class ImportDirectoryEntryRef;
friend class ExportDirectoryEntryRef;
const coff_file_header *COFFHeader;
const coff_bigobj_file_header *COFFBigObjHeader;
const pe32_header *PE32Header;
const pe32plus_header *PE32PlusHeader;
const data_directory *DataDirectory;
const coff_section *SectionTable;
const coff_symbol16 *SymbolTable16;
const coff_symbol32 *SymbolTable32;
const char *StringTable;
uint32_t StringTableSize;
const import_directory_table_entry *ImportDirectory;
uint32_t NumberOfImportDirectory;
const delay_import_directory_table_entry *DelayImportDirectory;
uint32_t NumberOfDelayImportDirectory;
const export_directory_table_entry *ExportDirectory;
const coff_base_reloc_block_header *BaseRelocHeader;
const coff_base_reloc_block_header *BaseRelocEnd;
std::error_code getString(uint32_t offset, StringRef &Res) const;
template <typename coff_symbol_type>
const coff_symbol_type *toSymb(DataRefImpl Symb) const;
const coff_section *toSec(DataRefImpl Sec) const;
const coff_relocation *toRel(DataRefImpl Rel) const;
std::error_code initSymbolTablePtr();
std::error_code initImportTablePtr();
std::error_code initDelayImportTablePtr();
std::error_code initExportTablePtr();
std::error_code initBaseRelocPtr();
public:
uintptr_t getSymbolTable() const {
if (SymbolTable16)
return reinterpret_cast<uintptr_t>(SymbolTable16);
if (SymbolTable32)
return reinterpret_cast<uintptr_t>(SymbolTable32);
return uintptr_t(0);
}
uint16_t getMachine() const {
if (COFFHeader)
return COFFHeader->Machine;
if (COFFBigObjHeader)
return COFFBigObjHeader->Machine;
llvm_unreachable("no COFF header!");
}
uint16_t getSizeOfOptionalHeader() const {
if (COFFHeader)
return COFFHeader->isImportLibrary() ? 0
: COFFHeader->SizeOfOptionalHeader;
// bigobj doesn't have this field.
if (COFFBigObjHeader)
return 0;
llvm_unreachable("no COFF header!");
}
uint16_t getCharacteristics() const {
if (COFFHeader)
return COFFHeader->isImportLibrary() ? 0 : COFFHeader->Characteristics;
// bigobj doesn't have characteristics to speak of,
// editbin will silently lie to you if you attempt to set any.
if (COFFBigObjHeader)
return 0;
llvm_unreachable("no COFF header!");
}
uint32_t getTimeDateStamp() const {
if (COFFHeader)
return COFFHeader->TimeDateStamp;
if (COFFBigObjHeader)
return COFFBigObjHeader->TimeDateStamp;
llvm_unreachable("no COFF header!");
}
uint32_t getNumberOfSections() const {
if (COFFHeader)
return COFFHeader->isImportLibrary() ? 0 : COFFHeader->NumberOfSections;
if (COFFBigObjHeader)
return COFFBigObjHeader->NumberOfSections;
llvm_unreachable("no COFF header!");
}
uint32_t getPointerToSymbolTable() const {
if (COFFHeader)
return COFFHeader->isImportLibrary() ? 0
: COFFHeader->PointerToSymbolTable;
if (COFFBigObjHeader)
return COFFBigObjHeader->PointerToSymbolTable;
llvm_unreachable("no COFF header!");
}
uint32_t getNumberOfSymbols() const {
if (COFFHeader)
return COFFHeader->isImportLibrary() ? 0 : COFFHeader->NumberOfSymbols;
if (COFFBigObjHeader)
return COFFBigObjHeader->NumberOfSymbols;
llvm_unreachable("no COFF header!");
}
protected:
void moveSymbolNext(DataRefImpl &Symb) const override;
ErrorOr<StringRef> getSymbolName(DataRefImpl Symb) const override;
ErrorOr<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
uint32_t getSymbolFlags(DataRefImpl Symb) const override;
SymbolRef::Type getSymbolType(DataRefImpl Symb) const override;
std::error_code getSymbolSection(DataRefImpl Symb,
section_iterator &Res) const override;
void moveSectionNext(DataRefImpl &Sec) const override;
std::error_code getSectionName(DataRefImpl Sec,
StringRef &Res) const override;
uint64_t getSectionAddress(DataRefImpl Sec) const override;
uint64_t getSectionSize(DataRefImpl Sec) const override;
std::error_code getSectionContents(DataRefImpl Sec,
StringRef &Res) const override;
uint64_t getSectionAlignment(DataRefImpl Sec) const override;
bool isSectionText(DataRefImpl Sec) const override;
bool isSectionData(DataRefImpl Sec) const override;
bool isSectionBSS(DataRefImpl Sec) const override;
bool isSectionVirtual(DataRefImpl Sec) const override;
relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
relocation_iterator section_rel_end(DataRefImpl Sec) const override;
void moveRelocationNext(DataRefImpl &Rel) const override;
uint64_t getRelocationOffset(DataRefImpl Rel) const override;
symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
uint64_t getRelocationType(DataRefImpl Rel) const override;
void getRelocationTypeName(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const override;
public:
COFFObjectFile(MemoryBufferRef Object, std::error_code &EC);
basic_symbol_iterator symbol_begin_impl() const override;
basic_symbol_iterator symbol_end_impl() const override;
section_iterator section_begin() const override;
section_iterator section_end() const override;
const coff_section *getCOFFSection(const SectionRef &Section) const;
COFFSymbolRef getCOFFSymbol(const DataRefImpl &Ref) const;
COFFSymbolRef getCOFFSymbol(const SymbolRef &Symbol) const;
const coff_relocation *getCOFFRelocation(const RelocationRef &Reloc) const;
unsigned getSectionID(SectionRef Sec) const;
unsigned getSymbolSectionID(SymbolRef Sym) const;
uint8_t getBytesInAddress() const override;
StringRef getFileFormatName() const override;
unsigned getArch() const override;
import_directory_iterator import_directory_begin() const;
import_directory_iterator import_directory_end() const;
delay_import_directory_iterator delay_import_directory_begin() const;
delay_import_directory_iterator delay_import_directory_end() const;
export_directory_iterator export_directory_begin() const;
export_directory_iterator export_directory_end() const;
base_reloc_iterator base_reloc_begin() const;
base_reloc_iterator base_reloc_end() const;
iterator_range<import_directory_iterator> import_directories() const;
iterator_range<delay_import_directory_iterator>
delay_import_directories() const;
iterator_range<export_directory_iterator> export_directories() const;
iterator_range<base_reloc_iterator> base_relocs() const;
const dos_header *getDOSHeader() const {
if (!PE32Header && !PE32PlusHeader)
return nullptr;
return reinterpret_cast<const dos_header *>(base());
}
std::error_code getPE32Header(const pe32_header *&Res) const;
std::error_code getPE32PlusHeader(const pe32plus_header *&Res) const;
std::error_code getDataDirectory(uint32_t index,
const data_directory *&Res) const;
std::error_code getSection(int32_t index, const coff_section *&Res) const;
template <typename coff_symbol_type>
std::error_code getSymbol(uint32_t Index,
const coff_symbol_type *&Res) const {
if (Index >= getNumberOfSymbols())
return object_error::parse_failed;
Res = reinterpret_cast<coff_symbol_type *>(getSymbolTable()) + Index;
return std::error_code();
}
ErrorOr<COFFSymbolRef> getSymbol(uint32_t index) const {
if (SymbolTable16) {
const coff_symbol16 *Symb = nullptr;
if (std::error_code EC = getSymbol(index, Symb))
return EC;
return COFFSymbolRef(Symb);
}
if (SymbolTable32) {
const coff_symbol32 *Symb = nullptr;
if (std::error_code EC = getSymbol(index, Symb))
return EC;
return COFFSymbolRef(Symb);
}
return object_error::parse_failed;
}
template <typename T>
std::error_code getAuxSymbol(uint32_t index, const T *&Res) const {
ErrorOr<COFFSymbolRef> s = getSymbol(index);
if (std::error_code EC = s.getError())
return EC;
Res = reinterpret_cast<const T *>(s->getRawPtr());
return std::error_code();
}
std::error_code getSymbolName(COFFSymbolRef Symbol, StringRef &Res) const;
std::error_code getSymbolName(const coff_symbol_generic *Symbol,
StringRef &Res) const;
ArrayRef<uint8_t> getSymbolAuxData(COFFSymbolRef Symbol) const;
size_t getSymbolTableEntrySize() const {
if (COFFHeader)
return sizeof(coff_symbol16);
if (COFFBigObjHeader)
return sizeof(coff_symbol32);
llvm_unreachable("null symbol table pointer!");
}
iterator_range<const coff_relocation *>
getRelocations(const coff_section *Sec) const;
std::error_code getSectionName(const coff_section *Sec, StringRef &Res) const;
uint64_t getSectionSize(const coff_section *Sec) const;
std::error_code getSectionContents(const coff_section *Sec,
ArrayRef<uint8_t> &Res) const;
std::error_code getVaPtr(uint64_t VA, uintptr_t &Res) const;
std::error_code getRvaPtr(uint32_t Rva, uintptr_t &Res) const;
std::error_code getHintName(uint32_t Rva, uint16_t &Hint,
StringRef &Name) const;
bool isRelocatableObject() const override;
bool is64() const { return PE32PlusHeader; }
static inline bool classof(const Binary *v) { return v->isCOFF(); }
};
// The iterator for the import directory table.
class ImportDirectoryEntryRef {
public:
ImportDirectoryEntryRef() : OwningObject(nullptr) {}
ImportDirectoryEntryRef(const import_directory_table_entry *Table, uint32_t I,
const COFFObjectFile *Owner)
: ImportTable(Table), Index(I), OwningObject(Owner) {}
bool operator==(const ImportDirectoryEntryRef &Other) const;
void moveNext();
imported_symbol_iterator imported_symbol_begin() const;
imported_symbol_iterator imported_symbol_end() const;
iterator_range<imported_symbol_iterator> imported_symbols() const;
std::error_code getName(StringRef &Result) const;
std::error_code getImportLookupTableRVA(uint32_t &Result) const;
std::error_code getImportAddressTableRVA(uint32_t &Result) const;
std::error_code
getImportTableEntry(const import_directory_table_entry *&Result) const;
std::error_code
getImportLookupEntry(const import_lookup_table_entry32 *&Result) const;
private:
const import_directory_table_entry *ImportTable;
uint32_t Index;
const COFFObjectFile *OwningObject;
};
class DelayImportDirectoryEntryRef {
public:
DelayImportDirectoryEntryRef() : OwningObject(nullptr) {}
DelayImportDirectoryEntryRef(const delay_import_directory_table_entry *T,
uint32_t I, const COFFObjectFile *Owner)
: Table(T), Index(I), OwningObject(Owner) {}
bool operator==(const DelayImportDirectoryEntryRef &Other) const;
void moveNext();
imported_symbol_iterator imported_symbol_begin() const;
imported_symbol_iterator imported_symbol_end() const;
iterator_range<imported_symbol_iterator> imported_symbols() const;
std::error_code getName(StringRef &Result) const;
std::error_code getDelayImportTable(
const delay_import_directory_table_entry *&Result) const;
std::error_code getImportAddress(int AddrIndex, uint64_t &Result) const;
private:
const delay_import_directory_table_entry *Table;
uint32_t Index;
const COFFObjectFile *OwningObject;
};
// The iterator for the export directory table entry.
class ExportDirectoryEntryRef {
public:
ExportDirectoryEntryRef() : OwningObject(nullptr) {}
ExportDirectoryEntryRef(const export_directory_table_entry *Table, uint32_t I,
const COFFObjectFile *Owner)
: ExportTable(Table), Index(I), OwningObject(Owner) {}
bool operator==(const ExportDirectoryEntryRef &Other) const;
void moveNext();
std::error_code getDllName(StringRef &Result) const;
std::error_code getOrdinalBase(uint32_t &Result) const;
std::error_code getOrdinal(uint32_t &Result) const;
std::error_code getExportRVA(uint32_t &Result) const;
std::error_code getSymbolName(StringRef &Result) const;
private:
const export_directory_table_entry *ExportTable;
uint32_t Index;
const COFFObjectFile *OwningObject;
};
class ImportedSymbolRef {
public:
ImportedSymbolRef() : OwningObject(nullptr) {}
ImportedSymbolRef(const import_lookup_table_entry32 *Entry, uint32_t I,
const COFFObjectFile *Owner)
: Entry32(Entry), Entry64(nullptr), Index(I), OwningObject(Owner) {}
ImportedSymbolRef(const import_lookup_table_entry64 *Entry, uint32_t I,
const COFFObjectFile *Owner)
: Entry32(nullptr), Entry64(Entry), Index(I), OwningObject(Owner) {}
bool operator==(const ImportedSymbolRef &Other) const;
void moveNext();
std::error_code getSymbolName(StringRef &Result) const;
std::error_code getOrdinal(uint16_t &Result) const;
private:
const import_lookup_table_entry32 *Entry32;
const import_lookup_table_entry64 *Entry64;
uint32_t Index;
const COFFObjectFile *OwningObject;
};
class BaseRelocRef {
public:
BaseRelocRef() : OwningObject(nullptr) {}
BaseRelocRef(const coff_base_reloc_block_header *Header,
const COFFObjectFile *Owner)
: Header(Header), Index(0), OwningObject(Owner) {}
bool operator==(const BaseRelocRef &Other) const;
void moveNext();
std::error_code getType(uint8_t &Type) const;
std::error_code getRVA(uint32_t &Result) const;
private:
const coff_base_reloc_block_header *Header;
uint32_t Index;
const COFFObjectFile *OwningObject;
};
} // end namespace object
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/Error.h | //===- Error.h - system_error extensions for Object -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This declares a new error_category for the Object library.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_ERROR_H
#define LLVM_OBJECT_ERROR_H
#include <system_error>
namespace llvm {
namespace object {
const std::error_category &object_category();
enum class object_error {
// Error code 0 is absent. Use std::error_code() instead.
arch_not_found = 1,
invalid_file_type,
parse_failed,
unexpected_eof,
string_table_non_null_end,
invalid_section_index,
bitcode_section_not_found,
macho_small_load_command,
macho_load_segment_too_many_sections,
macho_load_segment_too_small,
};
inline std::error_code make_error_code(object_error e) {
return std::error_code(static_cast<int>(e), object_category());
}
} // end namespace object.
} // end namespace llvm.
namespace std {
template <>
struct is_error_code_enum<llvm::object::object_error> : std::true_type {};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/ELFYAML.h | //===- ELFYAML.h - ELF YAMLIO implementation --------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file declares classes for handling the YAML representation
/// of ELF.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_ELFYAML_H
#define LLVM_OBJECT_ELFYAML_H
#include "llvm/MC/YAML.h"
#include "llvm/Support/ELF.h"
namespace llvm {
namespace ELFYAML {
// These types are invariant across 32/64-bit ELF, so for simplicity just
// directly give them their exact sizes. We don't need to worry about
// endianness because these are just the types in the YAMLIO structures,
// and are appropriately converted to the necessary endianness when
// reading/generating binary object files.
// The naming of these types is intended to be ELF_PREFIX, where PREFIX is
// the common prefix of the respective constants. E.g. ELF_EM corresponds
// to the `e_machine` constants, like `EM_X86_64`.
// In the future, these would probably be better suited by C++11 enum
// class's with appropriate fixed underlying type.
LLVM_YAML_STRONG_TYPEDEF(uint16_t, ELF_ET)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_EM)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_ELFCLASS)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_ELFDATA)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_ELFOSABI)
// Just use 64, since it can hold 32-bit values too.
LLVM_YAML_STRONG_TYPEDEF(uint64_t, ELF_EF)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_SHT)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_REL)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_RSS)
// Just use 64, since it can hold 32-bit values too.
LLVM_YAML_STRONG_TYPEDEF(uint64_t, ELF_SHF)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_STT)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_STV)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_STO)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, MIPS_AFL_REG)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, MIPS_ABI_FP)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_EXT)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_ASE)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_FLAGS1)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_ISA)
// For now, hardcode 64 bits everywhere that 32 or 64 would be needed
// since 64-bit can hold 32-bit values too.
struct FileHeader {
ELF_ELFCLASS Class;
ELF_ELFDATA Data;
ELF_ELFOSABI OSABI;
ELF_ET Type;
ELF_EM Machine;
ELF_EF Flags;
llvm::yaml::Hex64 Entry;
};
struct Symbol {
StringRef Name;
ELF_STT Type;
StringRef Section;
llvm::yaml::Hex64 Value;
llvm::yaml::Hex64 Size;
uint8_t Other;
};
struct LocalGlobalWeakSymbols {
std::vector<Symbol> Local;
std::vector<Symbol> Global;
std::vector<Symbol> Weak;
};
struct SectionOrType {
StringRef sectionNameOrType;
};
struct Section {
enum class SectionKind {
Group,
RawContent,
Relocation,
NoBits,
MipsABIFlags
};
SectionKind Kind;
StringRef Name;
ELF_SHT Type;
ELF_SHF Flags;
llvm::yaml::Hex64 Address;
StringRef Link;
StringRef Info;
llvm::yaml::Hex64 AddressAlign;
Section(SectionKind Kind) : Kind(Kind) {}
virtual ~Section();
};
struct RawContentSection : Section {
yaml::BinaryRef Content;
llvm::yaml::Hex64 Size;
RawContentSection() : Section(SectionKind::RawContent) {}
static bool classof(const Section *S) {
return S->Kind == SectionKind::RawContent;
}
};
struct NoBitsSection : Section {
llvm::yaml::Hex64 Size;
NoBitsSection() : Section(SectionKind::NoBits) {}
static bool classof(const Section *S) {
return S->Kind == SectionKind::NoBits;
}
};
struct Group : Section {
// Members of a group contain a flag and a list of section indices
// that are part of the group.
std::vector<SectionOrType> Members;
Group() : Section(SectionKind::Group) {}
static bool classof(const Section *S) {
return S->Kind == SectionKind::Group;
}
};
struct Relocation {
llvm::yaml::Hex64 Offset;
int64_t Addend;
ELF_REL Type;
StringRef Symbol;
};
struct RelocationSection : Section {
std::vector<Relocation> Relocations;
RelocationSection() : Section(SectionKind::Relocation) {}
static bool classof(const Section *S) {
return S->Kind == SectionKind::Relocation;
}
};
// Represents .MIPS.abiflags section
struct MipsABIFlags : Section {
llvm::yaml::Hex16 Version;
MIPS_ISA ISALevel;
llvm::yaml::Hex8 ISARevision;
MIPS_AFL_REG GPRSize;
MIPS_AFL_REG CPR1Size;
MIPS_AFL_REG CPR2Size;
MIPS_ABI_FP FpABI;
MIPS_AFL_EXT ISAExtension;
MIPS_AFL_ASE ASEs;
MIPS_AFL_FLAGS1 Flags1;
llvm::yaml::Hex32 Flags2;
MipsABIFlags() : Section(SectionKind::MipsABIFlags) {}
static bool classof(const Section *S) {
return S->Kind == SectionKind::MipsABIFlags;
}
};
struct Object {
FileHeader Header;
std::vector<std::unique_ptr<Section>> Sections;
// Although in reality the symbols reside in a section, it is a lot
// cleaner and nicer if we read them from the YAML as a separate
// top-level key, which automatically ensures that invariants like there
// being a single SHT_SYMTAB section are upheld.
LocalGlobalWeakSymbols Symbols;
};
} // end namespace ELFYAML
} // end namespace llvm
LLVM_YAML_IS_SEQUENCE_VECTOR(std::unique_ptr<llvm::ELFYAML::Section>)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::Symbol)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::Relocation)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::SectionOrType)
namespace llvm {
namespace yaml {
template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_ET> {
static void enumeration(IO &IO, ELFYAML::ELF_ET &Value);
};
template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_EM> {
static void enumeration(IO &IO, ELFYAML::ELF_EM &Value);
};
template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_ELFCLASS> {
static void enumeration(IO &IO, ELFYAML::ELF_ELFCLASS &Value);
};
template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_ELFDATA> {
static void enumeration(IO &IO, ELFYAML::ELF_ELFDATA &Value);
};
template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_ELFOSABI> {
static void enumeration(IO &IO, ELFYAML::ELF_ELFOSABI &Value);
};
template <>
struct ScalarBitSetTraits<ELFYAML::ELF_EF> {
static void bitset(IO &IO, ELFYAML::ELF_EF &Value);
};
template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_SHT> {
static void enumeration(IO &IO, ELFYAML::ELF_SHT &Value);
};
template <>
struct ScalarBitSetTraits<ELFYAML::ELF_SHF> {
static void bitset(IO &IO, ELFYAML::ELF_SHF &Value);
};
template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_STT> {
static void enumeration(IO &IO, ELFYAML::ELF_STT &Value);
};
template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_STV> {
static void enumeration(IO &IO, ELFYAML::ELF_STV &Value);
};
template <>
struct ScalarBitSetTraits<ELFYAML::ELF_STO> {
static void bitset(IO &IO, ELFYAML::ELF_STO &Value);
};
template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_REL> {
static void enumeration(IO &IO, ELFYAML::ELF_REL &Value);
};
template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_RSS> {
static void enumeration(IO &IO, ELFYAML::ELF_RSS &Value);
};
template <>
struct ScalarEnumerationTraits<ELFYAML::MIPS_AFL_REG> {
static void enumeration(IO &IO, ELFYAML::MIPS_AFL_REG &Value);
};
template <>
struct ScalarEnumerationTraits<ELFYAML::MIPS_ABI_FP> {
static void enumeration(IO &IO, ELFYAML::MIPS_ABI_FP &Value);
};
template <>
struct ScalarEnumerationTraits<ELFYAML::MIPS_AFL_EXT> {
static void enumeration(IO &IO, ELFYAML::MIPS_AFL_EXT &Value);
};
template <>
struct ScalarEnumerationTraits<ELFYAML::MIPS_ISA> {
static void enumeration(IO &IO, ELFYAML::MIPS_ISA &Value);
};
template <>
struct ScalarBitSetTraits<ELFYAML::MIPS_AFL_ASE> {
static void bitset(IO &IO, ELFYAML::MIPS_AFL_ASE &Value);
};
template <>
struct ScalarBitSetTraits<ELFYAML::MIPS_AFL_FLAGS1> {
static void bitset(IO &IO, ELFYAML::MIPS_AFL_FLAGS1 &Value);
};
template <>
struct MappingTraits<ELFYAML::FileHeader> {
static void mapping(IO &IO, ELFYAML::FileHeader &FileHdr);
};
template <>
struct MappingTraits<ELFYAML::Symbol> {
static void mapping(IO &IO, ELFYAML::Symbol &Symbol);
};
template <>
struct MappingTraits<ELFYAML::LocalGlobalWeakSymbols> {
static void mapping(IO &IO, ELFYAML::LocalGlobalWeakSymbols &Symbols);
};
template <> struct MappingTraits<ELFYAML::Relocation> {
static void mapping(IO &IO, ELFYAML::Relocation &Rel);
};
template <>
struct MappingTraits<std::unique_ptr<ELFYAML::Section>> {
static void mapping(IO &IO, std::unique_ptr<ELFYAML::Section> &Section);
static StringRef validate(IO &io, std::unique_ptr<ELFYAML::Section> &Section);
};
template <>
struct MappingTraits<ELFYAML::Object> {
static void mapping(IO &IO, ELFYAML::Object &Object);
};
template <> struct MappingTraits<ELFYAML::SectionOrType> {
static void mapping(IO &IO, ELFYAML::SectionOrType §ionOrType);
};
} // end namespace yaml
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/IRObjectFile.h | //===- IRObjectFile.h - LLVM IR object file implementation ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the IRObjectFile template class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_IROBJECTFILE_H
#define LLVM_OBJECT_IROBJECTFILE_H
#include "llvm/Object/SymbolicFile.h"
namespace llvm {
class Mangler;
class Module;
class GlobalValue;
namespace object {
class ObjectFile;
class IRObjectFile : public SymbolicFile {
std::unique_ptr<Module> M;
std::unique_ptr<Mangler> Mang;
std::vector<std::pair<std::string, uint32_t>> AsmSymbols;
public:
IRObjectFile(MemoryBufferRef Object, std::unique_ptr<Module> M);
~IRObjectFile() override;
void moveSymbolNext(DataRefImpl &Symb) const override;
std::error_code printSymbolName(raw_ostream &OS,
DataRefImpl Symb) const override;
uint32_t getSymbolFlags(DataRefImpl Symb) const override;
GlobalValue *getSymbolGV(DataRefImpl Symb);
const GlobalValue *getSymbolGV(DataRefImpl Symb) const {
return const_cast<IRObjectFile *>(this)->getSymbolGV(Symb);
}
basic_symbol_iterator symbol_begin_impl() const override;
basic_symbol_iterator symbol_end_impl() const override;
const Module &getModule() const {
return const_cast<IRObjectFile*>(this)->getModule();
}
Module &getModule() {
return *M;
}
std::unique_ptr<Module> takeModule();
static inline bool classof(const Binary *v) {
return v->isIR();
}
/// \brief Finds and returns bitcode embedded in the given object file, or an
/// error code if not found.
static ErrorOr<MemoryBufferRef> findBitcodeInObject(const ObjectFile &Obj);
/// \brief Finds and returns bitcode in the given memory buffer (which may
/// be either a bitcode file or a native object file with embedded bitcode),
/// or an error code if not found.
static ErrorOr<MemoryBufferRef>
findBitcodeInMemBuffer(MemoryBufferRef Object);
static ErrorOr<std::unique_ptr<IRObjectFile>> create(MemoryBufferRef Object,
LLVMContext &Context);
};
}
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/SelectionDAGNodes.h | //===-- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the SDNode class and derived classes, which are used to
// represent the nodes and operations present in a SelectionDAG. These nodes
// and operations are machine code level operations, with some similarities to
// the GCC RTL representation.
//
// Clients should include the SelectionDAG.h file instead of this file directly.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
#define LLVM_CODEGEN_SELECTIONDAGNODES_H
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
namespace llvm {
class SelectionDAG;
class GlobalValue;
class MachineBasicBlock;
class MachineConstantPoolValue;
class SDNode;
class Value;
class MCSymbol;
template <typename T> struct DenseMapInfo;
template <typename T> struct simplify_type;
template <typename T> struct ilist_traits;
void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
bool force = false);
/// This represents a list of ValueType's that has been intern'd by
/// a SelectionDAG. Instances of this simple value class are returned by
/// SelectionDAG::getVTList(...).
///
struct SDVTList {
const EVT *VTs;
unsigned int NumVTs;
};
namespace ISD {
/// Node predicates
/// Return true if the specified node is a
/// BUILD_VECTOR where all of the elements are ~0 or undef.
bool isBuildVectorAllOnes(const SDNode *N);
/// Return true if the specified node is a
/// BUILD_VECTOR where all of the elements are 0 or undef.
bool isBuildVectorAllZeros(const SDNode *N);
/// \brief Return true if the specified node is a BUILD_VECTOR node of
/// all ConstantSDNode or undef.
bool isBuildVectorOfConstantSDNodes(const SDNode *N);
/// \brief Return true if the specified node is a BUILD_VECTOR node of
/// all ConstantFPSDNode or undef.
bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
/// Return true if the specified node is a
/// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
/// element is not an undef.
bool isScalarToVector(const SDNode *N);
/// Return true if the node has at least one operand
/// and all operands of the specified node are ISD::UNDEF.
bool allOperandsUndef(const SDNode *N);
} // end llvm:ISD namespace
// //
///////////////////////////////////////////////////////////////////////////////
/// Unlike LLVM values, Selection DAG nodes may return multiple
/// values as the result of a computation. Many nodes return multiple values,
/// from loads (which define a token and a return value) to ADDC (which returns
/// a result and a carry value), to calls (which may return an arbitrary number
/// of values).
///
/// As such, each use of a SelectionDAG computation must indicate the node that
/// computes it as well as which return value to use from that node. This pair
/// of information is represented with the SDValue value type.
///
class SDValue {
friend struct DenseMapInfo<SDValue>;
SDNode *Node; // The node defining the value we are using.
unsigned ResNo; // Which return value of the node we are using.
public:
SDValue() : Node(nullptr), ResNo(0) {}
SDValue(SDNode *node, unsigned resno);
/// get the index which selects a specific result in the SDNode
unsigned getResNo() const { return ResNo; }
/// get the SDNode which holds the desired result
SDNode *getNode() const { return Node; }
/// set the SDNode
void setNode(SDNode *N) { Node = N; }
inline SDNode *operator->() const { return Node; }
bool operator==(const SDValue &O) const {
return Node == O.Node && ResNo == O.ResNo;
}
bool operator!=(const SDValue &O) const {
return !operator==(O);
}
bool operator<(const SDValue &O) const {
return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
}
explicit operator bool() const {
return Node != nullptr;
}
SDValue getValue(unsigned R) const {
return SDValue(Node, R);
}
// Return true if this node is an operand of N.
bool isOperandOf(const SDNode *N) const;
/// Return the ValueType of the referenced return value.
inline EVT getValueType() const;
/// Return the simple ValueType of the referenced return value.
MVT getSimpleValueType() const {
return getValueType().getSimpleVT();
}
/// Returns the size of the value in bits.
unsigned getValueSizeInBits() const {
return getValueType().getSizeInBits();
}
unsigned getScalarValueSizeInBits() const {
return getValueType().getScalarType().getSizeInBits();
}
// Forwarding methods - These forward to the corresponding methods in SDNode.
inline unsigned getOpcode() const;
inline unsigned getNumOperands() const;
inline const SDValue &getOperand(unsigned i) const;
inline uint64_t getConstantOperandVal(unsigned i) const;
inline bool isTargetMemoryOpcode() const;
inline bool isTargetOpcode() const;
inline bool isMachineOpcode() const;
inline unsigned getMachineOpcode() const;
inline const DebugLoc &getDebugLoc() const;
inline void dump() const;
inline void dumpr() const;
/// Return true if this operand (which must be a chain) reaches the
/// specified operand without crossing any side-effecting instructions.
/// In practice, this looks through token factors and non-volatile loads.
/// In order to remain efficient, this only
/// looks a couple of nodes in, it does not do an exhaustive search.
bool reachesChainWithoutSideEffects(SDValue Dest,
unsigned Depth = 2) const;
/// Return true if there are no nodes using value ResNo of Node.
inline bool use_empty() const;
/// Return true if there is exactly one node using value ResNo of Node.
inline bool hasOneUse() const;
};
template<> struct DenseMapInfo<SDValue> {
static inline SDValue getEmptyKey() {
SDValue V;
V.ResNo = -1U;
return V;
}
static inline SDValue getTombstoneKey() {
SDValue V;
V.ResNo = -2U;
return V;
}
static unsigned getHashValue(const SDValue &Val) {
return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
(unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
}
static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
return LHS == RHS;
}
};
template <> struct isPodLike<SDValue> { static const bool value = true; };
/// Allow casting operators to work directly on
/// SDValues as if they were SDNode*'s.
template<> struct simplify_type<SDValue> {
typedef SDNode* SimpleType;
static SimpleType getSimplifiedValue(SDValue &Val) {
return Val.getNode();
}
};
template<> struct simplify_type<const SDValue> {
typedef /*const*/ SDNode* SimpleType;
static SimpleType getSimplifiedValue(const SDValue &Val) {
return Val.getNode();
}
};
/// Represents a use of a SDNode. This class holds an SDValue,
/// which records the SDNode being used and the result number, a
/// pointer to the SDNode using the value, and Next and Prev pointers,
/// which link together all the uses of an SDNode.
///
class SDUse {
/// Val - The value being used.
SDValue Val;
/// User - The user of this value.
SDNode *User;
/// Prev, Next - Pointers to the uses list of the SDNode referred by
/// this operand.
SDUse **Prev, *Next;
SDUse(const SDUse &U) = delete;
void operator=(const SDUse &U) = delete;
public:
SDUse() : Val(), User(nullptr), Prev(nullptr), Next(nullptr) {}
/// Normally SDUse will just implicitly convert to an SDValue that it holds.
operator const SDValue&() const { return Val; }
/// If implicit conversion to SDValue doesn't work, the get() method returns
/// the SDValue.
const SDValue &get() const { return Val; }
/// This returns the SDNode that contains this Use.
SDNode *getUser() { return User; }
/// Get the next SDUse in the use list.
SDUse *getNext() const { return Next; }
/// Convenience function for get().getNode().
SDNode *getNode() const { return Val.getNode(); }
/// Convenience function for get().getResNo().
unsigned getResNo() const { return Val.getResNo(); }
/// Convenience function for get().getValueType().
EVT getValueType() const { return Val.getValueType(); }
/// Convenience function for get().operator==
bool operator==(const SDValue &V) const {
return Val == V;
}
/// Convenience function for get().operator!=
bool operator!=(const SDValue &V) const {
return Val != V;
}
/// Convenience function for get().operator<
bool operator<(const SDValue &V) const {
return Val < V;
}
private:
friend class SelectionDAG;
friend class SDNode;
void setUser(SDNode *p) { User = p; }
/// Remove this use from its existing use list, assign it the
/// given value, and add it to the new value's node's use list.
inline void set(const SDValue &V);
/// Like set, but only supports initializing a newly-allocated
/// SDUse with a non-null value.
inline void setInitial(const SDValue &V);
/// Like set, but only sets the Node portion of the value,
/// leaving the ResNo portion unmodified.
inline void setNode(SDNode *N);
void addToList(SDUse **List) {
Next = *List;
if (Next) Next->Prev = &Next;
Prev = List;
*List = this;
}
void removeFromList() {
*Prev = Next;
if (Next) Next->Prev = Prev;
}
};
/// simplify_type specializations - Allow casting operators to work directly on
/// SDValues as if they were SDNode*'s.
template<> struct simplify_type<SDUse> {
typedef SDNode* SimpleType;
static SimpleType getSimplifiedValue(SDUse &Val) {
return Val.getNode();
}
};
/// Represents one node in the SelectionDAG.
///
class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
private:
/// The operation that this node performs.
int16_t NodeType;
/// This is true if OperandList was new[]'d. If true,
/// then they will be delete[]'d when the node is destroyed.
uint16_t OperandsNeedDelete : 1;
/// This tracks whether this node has one or more dbg_value
/// nodes corresponding to it.
uint16_t HasDebugValue : 1;
protected:
/// This member is defined by this class, but is not used for
/// anything. Subclasses can use it to hold whatever state they find useful.
/// This field is initialized to zero by the ctor.
uint16_t SubclassData : 14;
private:
/// Unique id per SDNode in the DAG.
int NodeId;
/// The values that are used by this operation.
SDUse *OperandList;
/// The types of the values this node defines. SDNode's may
/// define multiple values simultaneously.
const EVT *ValueList;
/// List of uses for this SDNode.
SDUse *UseList;
/// The number of entries in the Operand/Value list.
unsigned short NumOperands, NumValues;
// The ordering of the SDNodes. It roughly corresponds to the ordering of the
// original LLVM instructions.
// This is used for turning off scheduling, because we'll forgo
// the normal scheduling algorithms and output the instructions according to
// this ordering.
unsigned IROrder;
/// Source line information.
DebugLoc debugLoc;
/// Return a pointer to the specified value type.
static const EVT *getValueTypeList(EVT VT);
friend class SelectionDAG;
friend struct ilist_traits<SDNode>;
public:
//===--------------------------------------------------------------------===//
// Accessors
//
/// Return the SelectionDAG opcode value for this node. For
/// pre-isel nodes (those for which isMachineOpcode returns false), these
/// are the opcode values in the ISD and <target>ISD namespaces. For
/// post-isel opcodes, see getMachineOpcode.
unsigned getOpcode() const { return (unsigned short)NodeType; }
/// Test if this node has a target-specific opcode (in the
/// \<target\>ISD namespace).
bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
/// Test if this node has a target-specific
/// memory-referencing opcode (in the \<target\>ISD namespace and
/// greater than FIRST_TARGET_MEMORY_OPCODE).
bool isTargetMemoryOpcode() const {
return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
}
/// Test if this node is a memory intrinsic (with valid pointer information).
/// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
/// non-memory intrinsics (with chains) that are not really instances of
/// MemSDNode. For such nodes, we need some extra state to determine the
/// proper classof relationship.
bool isMemIntrinsic() const {
return (NodeType == ISD::INTRINSIC_W_CHAIN ||
NodeType == ISD::INTRINSIC_VOID) && ((SubclassData >> 13) & 1);
}
/// Test if this node has a post-isel opcode, directly
/// corresponding to a MachineInstr opcode.
bool isMachineOpcode() const { return NodeType < 0; }
/// This may only be called if isMachineOpcode returns
/// true. It returns the MachineInstr opcode value that the node's opcode
/// corresponds to.
unsigned getMachineOpcode() const {
assert(isMachineOpcode() && "Not a MachineInstr opcode!");
return ~NodeType;
}
/// Get this bit.
bool getHasDebugValue() const { return HasDebugValue; }
/// Set this bit.
void setHasDebugValue(bool b) { HasDebugValue = b; }
/// Return true if there are no uses of this node.
bool use_empty() const { return UseList == nullptr; }
/// Return true if there is exactly one use of this node.
bool hasOneUse() const {
return !use_empty() && std::next(use_begin()) == use_end();
}
/// Return the number of uses of this node. This method takes
/// time proportional to the number of uses.
size_t use_size() const { return std::distance(use_begin(), use_end()); }
/// Return the unique node id.
int getNodeId() const { return NodeId; }
/// Set unique node id.
void setNodeId(int Id) { NodeId = Id; }
/// Return the node ordering.
unsigned getIROrder() const { return IROrder; }
/// Set the node ordering.
void setIROrder(unsigned Order) { IROrder = Order; }
/// Return the source location info.
const DebugLoc &getDebugLoc() const { return debugLoc; }
/// Set source location info. Try to avoid this, putting
/// it in the constructor is preferable.
void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
/// This class provides iterator support for SDUse
/// operands that use a specific SDNode.
class use_iterator {
SDUse *Op;
explicit use_iterator(SDUse *op) : Op(op) {
}
friend class SDNode;
public:
using iterator_category = std::forward_iterator_tag;
using value_type = SDUse;
using difference_type = std::ptrdiff_t;
using pointer = value_type *;
using reference = value_type &;
use_iterator(const use_iterator &I) : Op(I.Op) {}
use_iterator() : Op(nullptr) {}
bool operator==(const use_iterator &x) const {
return Op == x.Op;
}
bool operator!=(const use_iterator &x) const {
return !operator==(x);
}
/// Return true if this iterator is at the end of uses list.
bool atEnd() const { return Op == nullptr; }
// Iterator traversal: forward iteration only.
use_iterator &operator++() { // Preincrement
assert(Op && "Cannot increment end iterator!");
Op = Op->getNext();
return *this;
}
use_iterator operator++(int) { // Postincrement
use_iterator tmp = *this; ++*this; return tmp;
}
/// Retrieve a pointer to the current user node.
SDNode *operator*() const {
assert(Op && "Cannot dereference end iterator!");
return Op->getUser();
}
SDNode *operator->() const { return operator*(); }
SDUse &getUse() const { return *Op; }
/// Retrieve the operand # of this use in its user.
unsigned getOperandNo() const {
assert(Op && "Cannot dereference end iterator!");
return (unsigned)(Op - Op->getUser()->OperandList);
}
};
/// Provide iteration support to walk over all uses of an SDNode.
use_iterator use_begin() const {
return use_iterator(UseList);
}
static use_iterator use_end() { return use_iterator(nullptr); }
inline iterator_range<use_iterator> uses() {
return iterator_range<use_iterator>(use_begin(), use_end());
}
inline iterator_range<use_iterator> uses() const {
return iterator_range<use_iterator>(use_begin(), use_end());
}
/// Return true if there are exactly NUSES uses of the indicated value.
/// This method ignores uses of other values defined by this operation.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
/// Return true if there are any use of the indicated value.
/// This method ignores uses of other values defined by this operation.
bool hasAnyUseOfValue(unsigned Value) const;
/// Return true if this node is the only use of N.
bool isOnlyUserOf(const SDNode *N) const;
/// Return true if this node is an operand of N.
bool isOperandOf(const SDNode *N) const;
/// Return true if this node is a predecessor of N.
/// NOTE: Implemented on top of hasPredecessor and every bit as
/// expensive. Use carefully.
bool isPredecessorOf(const SDNode *N) const {
return N->hasPredecessor(this);
}
/// Return true if N is a predecessor of this node.
/// N is either an operand of this node, or can be reached by recursively
/// traversing up the operands.
/// NOTE: This is an expensive method. Use it carefully.
bool hasPredecessor(const SDNode *N) const;
/// Return true if N is a predecessor of this node.
/// N is either an operand of this node, or can be reached by recursively
/// traversing up the operands.
/// In this helper the Visited and worklist sets are held externally to
/// cache predecessors over multiple invocations. If you want to test for
/// multiple predecessors this method is preferable to multiple calls to
/// hasPredecessor. Be sure to clear Visited and Worklist if the DAG
/// changes.
/// NOTE: This is still very expensive. Use carefully.
bool hasPredecessorHelper(const SDNode *N,
SmallPtrSetImpl<const SDNode *> &Visited,
SmallVectorImpl<const SDNode *> &Worklist) const;
/// Return the number of values used by this operation.
unsigned getNumOperands() const { return NumOperands; }
/// Helper method returns the integer value of a ConstantSDNode operand.
uint64_t getConstantOperandVal(unsigned Num) const;
const SDValue &getOperand(unsigned Num) const {
assert(Num < NumOperands && "Invalid child # of SDNode!");
return OperandList[Num];
}
typedef SDUse* op_iterator;
op_iterator op_begin() const { return OperandList; }
op_iterator op_end() const { return OperandList+NumOperands; }
ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
/// Iterator for directly iterating over the operand SDValue's.
struct value_op_iterator
: iterator_adaptor_base<value_op_iterator, op_iterator,
std::random_access_iterator_tag, SDValue,
ptrdiff_t, value_op_iterator *,
value_op_iterator *> {
explicit value_op_iterator(SDUse *U = nullptr)
: iterator_adaptor_base(U) {}
const SDValue &operator*() const { return I->get(); }
};
iterator_range<value_op_iterator> op_values() const {
return iterator_range<value_op_iterator>(value_op_iterator(op_begin()),
value_op_iterator(op_end()));
}
SDVTList getVTList() const {
SDVTList X = { ValueList, NumValues };
return X;
}
/// If this node has a glue operand, return the node
/// to which the glue operand points. Otherwise return NULL.
SDNode *getGluedNode() const {
if (getNumOperands() != 0 &&
getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
return getOperand(getNumOperands()-1).getNode();
return nullptr;
}
// If this is a pseudo op, like copyfromreg, look to see if there is a
// real target node glued to it. If so, return the target node.
const SDNode *getGluedMachineNode() const {
const SDNode *FoundNode = this;
// Climb up glue edges until a machine-opcode node is found, or the
// end of the chain is reached.
while (!FoundNode->isMachineOpcode()) {
const SDNode *N = FoundNode->getGluedNode();
if (!N) break;
FoundNode = N;
}
return FoundNode;
}
/// If this node has a glue value with a user, return
/// the user (there is at most one). Otherwise return NULL.
SDNode *getGluedUser() const {
for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
if (UI.getUse().get().getValueType() == MVT::Glue)
return *UI;
return nullptr;
}
/// Return the number of values defined/returned by this operator.
unsigned getNumValues() const { return NumValues; }
/// Return the type of a specified result.
EVT getValueType(unsigned ResNo) const {
assert(ResNo < NumValues && "Illegal result number!");
return ValueList[ResNo];
}
/// Return the type of a specified result as a simple type.
MVT getSimpleValueType(unsigned ResNo) const {
return getValueType(ResNo).getSimpleVT();
}
/// Returns MVT::getSizeInBits(getValueType(ResNo)).
unsigned getValueSizeInBits(unsigned ResNo) const {
return getValueType(ResNo).getSizeInBits();
}
typedef const EVT* value_iterator;
value_iterator value_begin() const { return ValueList; }
value_iterator value_end() const { return ValueList+NumValues; }
/// Return the opcode of this operation for printing.
std::string getOperationName(const SelectionDAG *G = nullptr) const;
static const char* getIndexedModeName(ISD::MemIndexedMode AM);
void print_types(raw_ostream &OS, const SelectionDAG *G) const;
void print_details(raw_ostream &OS, const SelectionDAG *G) const;
void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
/// Print a SelectionDAG node and all children down to
/// the leaves. The given SelectionDAG allows target-specific nodes
/// to be printed in human-readable form. Unlike printr, this will
/// print the whole DAG, including children that appear multiple
/// times.
///
void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
/// Print a SelectionDAG node and children up to
/// depth "depth." The given SelectionDAG allows target-specific
/// nodes to be printed in human-readable form. Unlike printr, this
/// will print children that appear multiple times wherever they are
/// used.
///
void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
unsigned depth = 100) const;
/// Dump this node, for debugging.
void dump() const;
/// Dump (recursively) this node and its use-def subgraph.
void dumpr() const;
/// Dump this node, for debugging.
/// The given SelectionDAG allows target-specific nodes to be printed
/// in human-readable form.
void dump(const SelectionDAG *G) const;
/// Dump (recursively) this node and its use-def subgraph.
/// The given SelectionDAG allows target-specific nodes to be printed
/// in human-readable form.
void dumpr(const SelectionDAG *G) const;
/// printrFull to dbgs(). The given SelectionDAG allows
/// target-specific nodes to be printed in human-readable form.
/// Unlike dumpr, this will print the whole DAG, including children
/// that appear multiple times.
void dumprFull(const SelectionDAG *G = nullptr) const;
/// printrWithDepth to dbgs(). The given
/// SelectionDAG allows target-specific nodes to be printed in
/// human-readable form. Unlike dumpr, this will print children
/// that appear multiple times wherever they are used.
///
void dumprWithDepth(const SelectionDAG *G = nullptr,
unsigned depth = 100) const;
/// Gather unique data for the node.
void Profile(FoldingSetNodeID &ID) const;
/// This method should only be used by the SDUse class.
void addUse(SDUse &U) { U.addToList(&UseList); }
protected:
static SDVTList getSDVTList(EVT VT) {
SDVTList Ret = { getValueTypeList(VT), 1 };
return Ret;
}
SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
ArrayRef<SDValue> Ops)
: NodeType(Opc), OperandsNeedDelete(true), HasDebugValue(false),
SubclassData(0), NodeId(-1),
OperandList(Ops.size() ? new SDUse[Ops.size()] : nullptr),
ValueList(VTs.VTs), UseList(nullptr), NumOperands(Ops.size()),
NumValues(VTs.NumVTs), IROrder(Order), debugLoc(std::move(dl)) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
assert(NumOperands == Ops.size() &&
"NumOperands wasn't wide enough for its operands!");
assert(NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!");
for (unsigned i = 0; i != Ops.size(); ++i) {
assert(OperandList && "no operands available");
OperandList[i].setUser(this);
OperandList[i].setInitial(Ops[i]);
}
checkForCycles(this);
}
/// This constructor adds no operands itself; operands can be
/// set later with InitOperands.
SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
: NodeType(Opc), OperandsNeedDelete(false), HasDebugValue(false),
SubclassData(0), NodeId(-1), OperandList(nullptr), ValueList(VTs.VTs),
UseList(nullptr), NumOperands(0), NumValues(VTs.NumVTs),
IROrder(Order), debugLoc(std::move(dl)) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
assert(NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!");
}
/// Initialize the operands list of this with 1 operand.
void InitOperands(SDUse *Ops, const SDValue &Op0) {
Ops[0].setUser(this);
Ops[0].setInitial(Op0);
NumOperands = 1;
OperandList = Ops;
checkForCycles(this);
}
/// Initialize the operands list of this with 2 operands.
void InitOperands(SDUse *Ops, const SDValue &Op0, const SDValue &Op1) {
Ops[0].setUser(this);
Ops[0].setInitial(Op0);
Ops[1].setUser(this);
Ops[1].setInitial(Op1);
NumOperands = 2;
OperandList = Ops;
checkForCycles(this);
}
/// Initialize the operands list of this with 3 operands.
void InitOperands(SDUse *Ops, const SDValue &Op0, const SDValue &Op1,
const SDValue &Op2) {
Ops[0].setUser(this);
Ops[0].setInitial(Op0);
Ops[1].setUser(this);
Ops[1].setInitial(Op1);
Ops[2].setUser(this);
Ops[2].setInitial(Op2);
NumOperands = 3;
OperandList = Ops;
checkForCycles(this);
}
/// Initialize the operands list of this with 4 operands.
void InitOperands(SDUse *Ops, const SDValue &Op0, const SDValue &Op1,
const SDValue &Op2, const SDValue &Op3) {
Ops[0].setUser(this);
Ops[0].setInitial(Op0);
Ops[1].setUser(this);
Ops[1].setInitial(Op1);
Ops[2].setUser(this);
Ops[2].setInitial(Op2);
Ops[3].setUser(this);
Ops[3].setInitial(Op3);
NumOperands = 4;
OperandList = Ops;
checkForCycles(this);
}
/// Initialize the operands list of this with N operands.
void InitOperands(SDUse *Ops, const SDValue *Vals, unsigned N) {
for (unsigned i = 0; i != N; ++i) {
Ops[i].setUser(this);
Ops[i].setInitial(Vals[i]);
}
NumOperands = N;
assert(NumOperands == N &&
"NumOperands wasn't wide enough for its operands!");
OperandList = Ops;
checkForCycles(this);
}
/// Release the operands and set this node to have zero operands.
void DropOperands();
};
/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
/// into SDNode creation functions.
/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
/// from the original Instruction, and IROrder is the ordinal position of
/// the instruction.
/// When an SDNode is created after the DAG is being built, both DebugLoc and
/// the IROrder are propagated from the original SDNode.
/// So SDLoc class provides two constructors besides the default one, one to
/// be used by the DAGBuilder, the other to be used by others.
class SDLoc {
private:
// Ptr could be used for either Instruction* or SDNode*. It is used for
// Instruction* if IROrder is not -1.
const void *Ptr;
int IROrder;
public:
SDLoc() : Ptr(nullptr), IROrder(0) {}
SDLoc(const SDNode *N) : Ptr(N), IROrder(-1) {
assert(N && "null SDNode");
}
SDLoc(const SDValue V) : Ptr(V.getNode()), IROrder(-1) {
assert(Ptr && "null SDNode");
}
SDLoc(const Instruction *I, int Order) : Ptr(I), IROrder(Order) {
assert(Order >= 0 && "bad IROrder");
}
unsigned getIROrder() {
if (IROrder >= 0 || Ptr == nullptr) {
return (unsigned)IROrder;
}
const SDNode *N = (const SDNode*)(Ptr);
return N->getIROrder();
}
DebugLoc getDebugLoc() {
if (!Ptr) {
return DebugLoc();
}
if (IROrder >= 0) {
const Instruction *I = (const Instruction*)(Ptr);
return I->getDebugLoc();
}
const SDNode *N = (const SDNode*)(Ptr);
return N->getDebugLoc();
}
};
// Define inline functions from the SDValue class.
inline SDValue::SDValue(SDNode *node, unsigned resno)
: Node(node), ResNo(resno) {
assert((!Node || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!");
assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.");
}
inline unsigned SDValue::getOpcode() const {
return Node->getOpcode();
}
inline EVT SDValue::getValueType() const {
return Node->getValueType(ResNo);
}
inline unsigned SDValue::getNumOperands() const {
return Node->getNumOperands();
}
inline const SDValue &SDValue::getOperand(unsigned i) const {
return Node->getOperand(i);
}
inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
return Node->getConstantOperandVal(i);
}
inline bool SDValue::isTargetOpcode() const {
return Node->isTargetOpcode();
}
inline bool SDValue::isTargetMemoryOpcode() const {
return Node->isTargetMemoryOpcode();
}
inline bool SDValue::isMachineOpcode() const {
return Node->isMachineOpcode();
}
inline unsigned SDValue::getMachineOpcode() const {
return Node->getMachineOpcode();
}
inline bool SDValue::use_empty() const {
return !Node->hasAnyUseOfValue(ResNo);
}
inline bool SDValue::hasOneUse() const {
return Node->hasNUsesOfValue(1, ResNo);
}
inline const DebugLoc &SDValue::getDebugLoc() const {
return Node->getDebugLoc();
}
inline void SDValue::dump() const {
return Node->dump();
}
inline void SDValue::dumpr() const {
return Node->dumpr();
}
// Define inline functions from the SDUse class.
inline void SDUse::set(const SDValue &V) {
if (Val.getNode()) removeFromList();
Val = V;
if (V.getNode()) V.getNode()->addUse(*this);
}
inline void SDUse::setInitial(const SDValue &V) {
Val = V;
V.getNode()->addUse(*this);
}
inline void SDUse::setNode(SDNode *N) {
if (Val.getNode()) removeFromList();
Val.setNode(N);
if (N) N->addUse(*this);
}
/// These are IR-level optimization flags that may be propagated to SDNodes.
/// TODO: This data structure should be shared by the IR optimizer and the
/// the backend.
struct SDNodeFlags {
private:
bool NoUnsignedWrap : 1;
bool NoSignedWrap : 1;
bool Exact : 1;
bool UnsafeAlgebra : 1;
bool NoNaNs : 1;
bool NoInfs : 1;
bool NoSignedZeros : 1;
bool AllowReciprocal : 1;
public:
/// Default constructor turns off all optimization flags.
SDNodeFlags() {
NoUnsignedWrap = false;
NoSignedWrap = false;
Exact = false;
UnsafeAlgebra = false;
NoNaNs = false;
NoInfs = false;
NoSignedZeros = false;
AllowReciprocal = false;
}
// These are mutators for each flag.
void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
void setNoSignedWrap(bool b) { NoSignedWrap = b; }
void setExact(bool b) { Exact = b; }
void setUnsafeAlgebra(bool b) { UnsafeAlgebra = b; }
void setNoNaNs(bool b) { NoNaNs = b; }
void setNoInfs(bool b) { NoInfs = b; }
void setNoSignedZeros(bool b) { NoSignedZeros = b; }
void setAllowReciprocal(bool b) { AllowReciprocal = b; }
// These are accessors for each flag.
bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
bool hasNoSignedWrap() const { return NoSignedWrap; }
bool hasExact() const { return Exact; }
bool hasUnsafeAlgebra() const { return UnsafeAlgebra; }
bool hasNoNaNs() const { return NoNaNs; }
bool hasNoInfs() const { return NoInfs; }
bool hasNoSignedZeros() const { return NoSignedZeros; }
bool hasAllowReciprocal() const { return AllowReciprocal; }
/// Return a raw encoding of the flags.
/// This function should only be used to add data to the NodeID value.
unsigned getRawFlags() const {
return (NoUnsignedWrap << 0) | (NoSignedWrap << 1) | (Exact << 2) |
(UnsafeAlgebra << 3) | (NoNaNs << 4) | (NoInfs << 5) |
(NoSignedZeros << 6) | (AllowReciprocal << 7);
}
};
/// This class is used for single-operand SDNodes. This is solely
/// to allow co-allocation of node operands with the node itself.
class UnarySDNode : public SDNode {
SDUse Op;
public:
UnarySDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
SDValue X)
: SDNode(Opc, Order, dl, VTs) {
InitOperands(&Op, X);
}
};
/// This class is used for two-operand SDNodes. This is solely
/// to allow co-allocation of node operands with the node itself.
class BinarySDNode : public SDNode {
SDUse Ops[2];
public:
BinarySDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
SDValue X, SDValue Y)
: SDNode(Opc, Order, dl, VTs) {
InitOperands(Ops, X, Y);
}
};
/// Returns true if the opcode is a binary operation with flags.
static bool isBinOpWithFlags(unsigned Opcode) {
switch (Opcode) {
case ISD::SDIV:
case ISD::UDIV:
case ISD::SRA:
case ISD::SRL:
case ISD::MUL:
case ISD::ADD:
case ISD::SUB:
case ISD::SHL:
case ISD::FADD:
case ISD::FDIV:
case ISD::FMUL:
case ISD::FREM:
case ISD::FSUB:
return true;
default:
return false;
}
}
/// This class is an extension of BinarySDNode
/// used from those opcodes that have associated extra flags.
class BinaryWithFlagsSDNode : public BinarySDNode {
public:
SDNodeFlags Flags;
BinaryWithFlagsSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
SDValue X, SDValue Y, const SDNodeFlags &NodeFlags)
: BinarySDNode(Opc, Order, dl, VTs, X, Y), Flags(NodeFlags) {}
static bool classof(const SDNode *N) {
return isBinOpWithFlags(N->getOpcode());
}
};
/// This class is used for three-operand SDNodes. This is solely
/// to allow co-allocation of node operands with the node itself.
class TernarySDNode : public SDNode {
SDUse Ops[3];
public:
TernarySDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
SDValue X, SDValue Y, SDValue Z)
: SDNode(Opc, Order, dl, VTs) {
InitOperands(Ops, X, Y, Z);
}
};
/// This class is used to form a handle around another node that
/// is persistent and is updated across invocations of replaceAllUsesWith on its
/// operand. This node should be directly created by end-users and not added to
/// the AllNodes list.
class HandleSDNode : public SDNode {
SDUse Op;
public:
explicit HandleSDNode(SDValue X)
: SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
InitOperands(&Op, X);
}
~HandleSDNode();
const SDValue &getValue() const { return Op; }
};
class AddrSpaceCastSDNode : public UnarySDNode {
private:
unsigned SrcAddrSpace;
unsigned DestAddrSpace;
public:
AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT, SDValue X,
unsigned SrcAS, unsigned DestAS);
unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
unsigned getDestAddressSpace() const { return DestAddrSpace; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::ADDRSPACECAST;
}
};
/// This is an abstract virtual class for memory operations.
class MemSDNode : public SDNode {
private:
// VT of in-memory value.
EVT MemoryVT;
protected:
/// Memory reference information.
MachineMemOperand *MMO;
public:
MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
EVT MemoryVT, MachineMemOperand *MMO);
MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
ArrayRef<SDValue> Ops, EVT MemoryVT, MachineMemOperand *MMO);
bool readMem() const { return MMO->isLoad(); }
bool writeMem() const { return MMO->isStore(); }
/// Returns alignment and volatility of the memory access
unsigned getOriginalAlignment() const {
return MMO->getBaseAlignment();
}
unsigned getAlignment() const {
return MMO->getAlignment();
}
/// Return the SubclassData value, which contains an
/// encoding of the volatile flag, as well as bits used by subclasses. This
/// function should only be used to compute a FoldingSetNodeID value.
unsigned getRawSubclassData() const {
return SubclassData;
}
// We access subclass data here so that we can check consistency
// with MachineMemOperand information.
bool isVolatile() const { return (SubclassData >> 5) & 1; }
bool isNonTemporal() const { return (SubclassData >> 6) & 1; }
bool isInvariant() const { return (SubclassData >> 7) & 1; }
AtomicOrdering getOrdering() const {
return AtomicOrdering((SubclassData >> 8) & 15);
}
SynchronizationScope getSynchScope() const {
return SynchronizationScope((SubclassData >> 12) & 1);
}
// Returns the offset from the location of the access.
int64_t getSrcValueOffset() const { return MMO->getOffset(); }
/// Returns the AA info that describes the dereference.
AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
/// Returns the Ranges that describes the dereference.
const MDNode *getRanges() const { return MMO->getRanges(); }
/// Return the type of the in-memory value.
EVT getMemoryVT() const { return MemoryVT; }
/// Return a MachineMemOperand object describing the memory
/// reference performed by operation.
MachineMemOperand *getMemOperand() const { return MMO; }
const MachinePointerInfo &getPointerInfo() const {
return MMO->getPointerInfo();
}
/// Return the address space for the associated pointer
unsigned getAddressSpace() const {
return getPointerInfo().getAddrSpace();
}
/// Update this MemSDNode's MachineMemOperand information
/// to reflect the alignment of NewMMO, if it has a greater alignment.
/// This must only be used when the new alignment applies to all users of
/// this MachineMemOperand.
void refineAlignment(const MachineMemOperand *NewMMO) {
MMO->refineAlignment(NewMMO);
}
const SDValue &getChain() const { return getOperand(0); }
const SDValue &getBasePtr() const {
return getOperand(getOpcode() == ISD::STORE ? 2 : 1);
}
// Methods to support isa and dyn_cast
static bool classof(const SDNode *N) {
// For some targets, we lower some target intrinsics to a MemIntrinsicNode
// with either an intrinsic or a target opcode.
return N->getOpcode() == ISD::LOAD ||
N->getOpcode() == ISD::STORE ||
N->getOpcode() == ISD::PREFETCH ||
N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
N->getOpcode() == ISD::ATOMIC_SWAP ||
N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
N->getOpcode() == ISD::ATOMIC_LOAD ||
N->getOpcode() == ISD::ATOMIC_STORE ||
N->getOpcode() == ISD::MLOAD ||
N->getOpcode() == ISD::MSTORE ||
N->getOpcode() == ISD::MGATHER ||
N->getOpcode() == ISD::MSCATTER ||
N->isMemIntrinsic() ||
N->isTargetMemoryOpcode();
}
};
/// This is an SDNode representing atomic operations.
class AtomicSDNode : public MemSDNode {
SDUse Ops[4];
/// For cmpxchg instructions, the ordering requirements when a store does not
/// occur.
AtomicOrdering FailureOrdering;
void InitAtomic(AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope) {
// This must match encodeMemSDNodeFlags() in SelectionDAG.cpp.
assert((SuccessOrdering & 15) == SuccessOrdering &&
"Ordering may not require more than 4 bits!");
assert((FailureOrdering & 15) == FailureOrdering &&
"Ordering may not require more than 4 bits!");
assert((SynchScope & 1) == SynchScope &&
"SynchScope may not require more than 1 bit!");
SubclassData |= SuccessOrdering << 8;
SubclassData |= SynchScope << 12;
this->FailureOrdering = FailureOrdering;
assert(getSuccessOrdering() == SuccessOrdering &&
"Ordering encoding error!");
assert(getFailureOrdering() == FailureOrdering &&
"Ordering encoding error!");
assert(getSynchScope() == SynchScope && "Synch-scope encoding error!");
}
public:
// Opc: opcode for atomic
// VTL: value type list
// Chain: memory chain for operaand
// Ptr: address to update as a SDValue
// Cmp: compare value
// Swp: swap value
// SrcVal: address to update as a Value (used for MemOperand)
// Align: alignment of memory
AtomicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTL,
EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp,
MachineMemOperand *MMO, AtomicOrdering Ordering,
SynchronizationScope SynchScope)
: MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
InitAtomic(Ordering, Ordering, SynchScope);
InitOperands(Ops, Chain, Ptr, Cmp, Swp);
}
AtomicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTL,
EVT MemVT,
SDValue Chain, SDValue Ptr,
SDValue Val, MachineMemOperand *MMO,
AtomicOrdering Ordering, SynchronizationScope SynchScope)
: MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
InitAtomic(Ordering, Ordering, SynchScope);
InitOperands(Ops, Chain, Ptr, Val);
}
AtomicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTL,
EVT MemVT,
SDValue Chain, SDValue Ptr,
MachineMemOperand *MMO,
AtomicOrdering Ordering, SynchronizationScope SynchScope)
: MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
InitAtomic(Ordering, Ordering, SynchScope);
InitOperands(Ops, Chain, Ptr);
}
AtomicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTL, EVT MemVT,
const SDValue* AllOps, SDUse *DynOps, unsigned NumOps,
MachineMemOperand *MMO,
AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope)
: MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
InitAtomic(SuccessOrdering, FailureOrdering, SynchScope);
assert((DynOps || NumOps <= array_lengthof(Ops)) &&
"Too many ops for internal storage!");
InitOperands(DynOps ? DynOps : Ops, AllOps, NumOps);
}
const SDValue &getBasePtr() const { return getOperand(1); }
const SDValue &getVal() const { return getOperand(2); }
AtomicOrdering getSuccessOrdering() const {
return getOrdering();
}
// Not quite enough room in SubclassData for everything, so failure gets its
// own field.
AtomicOrdering getFailureOrdering() const {
return FailureOrdering;
}
bool isCompareAndSwap() const {
unsigned Op = getOpcode();
return Op == ISD::ATOMIC_CMP_SWAP || Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
}
// Methods to support isa and dyn_cast
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
N->getOpcode() == ISD::ATOMIC_SWAP ||
N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
N->getOpcode() == ISD::ATOMIC_LOAD ||
N->getOpcode() == ISD::ATOMIC_STORE;
}
};
/// This SDNode is used for target intrinsics that touch
/// memory and need an associated MachineMemOperand. Its opcode may be
/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
class MemIntrinsicSDNode : public MemSDNode {
public:
MemIntrinsicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
ArrayRef<SDValue> Ops, EVT MemoryVT,
MachineMemOperand *MMO)
: MemSDNode(Opc, Order, dl, VTs, Ops, MemoryVT, MMO) {
SubclassData |= 1u << 13;
}
// Methods to support isa and dyn_cast
static bool classof(const SDNode *N) {
// We lower some target intrinsics to their target opcode
// early a node with a target opcode can be of this class
return N->isMemIntrinsic() ||
N->getOpcode() == ISD::PREFETCH ||
N->isTargetMemoryOpcode();
}
};
/// This SDNode is used to implement the code generator
/// support for the llvm IR shufflevector instruction. It combines elements
/// from two input vectors into a new input vector, with the selection and
/// ordering of elements determined by an array of integers, referred to as
/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
/// An index of -1 is treated as undef, such that the code generator may put
/// any value in the corresponding element of the result.
class ShuffleVectorSDNode : public SDNode {
SDUse Ops[2];
// The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
// is freed when the SelectionDAG object is destroyed.
const int *Mask;
protected:
friend class SelectionDAG;
ShuffleVectorSDNode(EVT VT, unsigned Order, DebugLoc dl, SDValue N1,
SDValue N2, const int *M)
: SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {
InitOperands(Ops, N1, N2);
}
public:
ArrayRef<int> getMask() const {
EVT VT = getValueType(0);
return makeArrayRef(Mask, VT.getVectorNumElements());
}
int getMaskElt(unsigned Idx) const {
assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!");
return Mask[Idx];
}
bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
int getSplatIndex() const {
assert(isSplat() && "Cannot get splat index for non-splat!");
EVT VT = getValueType(0);
for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
if (Mask[i] >= 0)
return Mask[i];
}
llvm_unreachable("Splat with all undef indices?");
}
static bool isSplatMask(const int *Mask, EVT VT);
/// Change values in a shuffle permute mask assuming
/// the two vector operands have swapped position.
static void commuteMask(SmallVectorImpl<int> &Mask) {
unsigned NumElems = Mask.size();
for (unsigned i = 0; i != NumElems; ++i) {
int idx = Mask[i];
if (idx < 0)
continue;
else if (idx < (int)NumElems)
Mask[i] = idx + NumElems;
else
Mask[i] = idx - NumElems;
}
}
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::VECTOR_SHUFFLE;
}
};
class ConstantSDNode : public SDNode {
const ConstantInt *Value;
friend class SelectionDAG;
ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val,
DebugLoc DL, EVT VT)
: SDNode(isTarget ? ISD::TargetConstant : ISD::Constant,
0, DL, getSDVTList(VT)), Value(val) {
SubclassData |= (uint16_t)isOpaque;
}
public:
const ConstantInt *getConstantIntValue() const { return Value; }
const APInt &getAPIntValue() const { return Value->getValue(); }
uint64_t getZExtValue() const { return Value->getZExtValue(); }
int64_t getSExtValue() const { return Value->getSExtValue(); }
bool isOne() const { return Value->isOne(); }
bool isNullValue() const { return Value->isNullValue(); }
bool isAllOnesValue() const { return Value->isAllOnesValue(); }
bool isOpaque() const { return SubclassData & 1; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::Constant ||
N->getOpcode() == ISD::TargetConstant;
}
};
class ConstantFPSDNode : public SDNode {
const ConstantFP *Value;
friend class SelectionDAG;
ConstantFPSDNode(bool isTarget, const ConstantFP *val, DebugLoc DL, EVT VT)
: SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP,
0, DL, getSDVTList(VT)), Value(val) {
}
public:
const APFloat& getValueAPF() const { return Value->getValueAPF(); }
const ConstantFP *getConstantFPValue() const { return Value; }
/// Return true if the value is positive or negative zero.
bool isZero() const { return Value->isZero(); }
/// Return true if the value is a NaN.
bool isNaN() const { return Value->isNaN(); }
/// Return true if the value is an infinity
bool isInfinity() const { return Value->isInfinity(); }
/// Return true if the value is negative.
bool isNegative() const { return Value->isNegative(); }
/// We don't rely on operator== working on double values, as
/// it returns true for things that are clearly not equal, like -0.0 and 0.0.
/// As such, this method can be used to do an exact bit-for-bit comparison of
/// two floating point values.
/// We leave the version with the double argument here because it's just so
/// convenient to write "2.0" and the like. Without this function we'd
/// have to duplicate its logic everywhere it's called.
bool isExactlyValue(double V) const {
bool ignored;
APFloat Tmp(V);
Tmp.convert(Value->getValueAPF().getSemantics(),
APFloat::rmNearestTiesToEven, &ignored);
return isExactlyValue(Tmp);
}
bool isExactlyValue(const APFloat& V) const;
static bool isValueValidForType(EVT VT, const APFloat& Val);
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::ConstantFP ||
N->getOpcode() == ISD::TargetConstantFP;
}
};
class GlobalAddressSDNode : public SDNode {
const GlobalValue *TheGlobal;
int64_t Offset;
unsigned char TargetFlags;
friend class SelectionDAG;
GlobalAddressSDNode(unsigned Opc, unsigned Order, DebugLoc DL,
const GlobalValue *GA, EVT VT, int64_t o,
unsigned char TargetFlags);
public:
const GlobalValue *getGlobal() const { return TheGlobal; }
int64_t getOffset() const { return Offset; }
unsigned char getTargetFlags() const { return TargetFlags; }
// Return the address space this GlobalAddress belongs to.
unsigned getAddressSpace() const;
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::GlobalAddress ||
N->getOpcode() == ISD::TargetGlobalAddress ||
N->getOpcode() == ISD::GlobalTLSAddress ||
N->getOpcode() == ISD::TargetGlobalTLSAddress;
}
};
class FrameIndexSDNode : public SDNode {
int FI;
friend class SelectionDAG;
FrameIndexSDNode(int fi, EVT VT, bool isTarg)
: SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
0, DebugLoc(), getSDVTList(VT)), FI(fi) {
}
public:
int getIndex() const { return FI; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::FrameIndex ||
N->getOpcode() == ISD::TargetFrameIndex;
}
};
class JumpTableSDNode : public SDNode {
int JTI;
unsigned char TargetFlags;
friend class SelectionDAG;
JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned char TF)
: SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
}
public:
int getIndex() const { return JTI; }
unsigned char getTargetFlags() const { return TargetFlags; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::JumpTable ||
N->getOpcode() == ISD::TargetJumpTable;
}
};
class ConstantPoolSDNode : public SDNode {
union {
const Constant *ConstVal;
MachineConstantPoolValue *MachineCPVal;
} Val;
int Offset; // It's a MachineConstantPoolValue if top bit is set.
unsigned Alignment; // Minimum alignment requirement of CP (not log2 value).
unsigned char TargetFlags;
friend class SelectionDAG;
ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
unsigned Align, unsigned char TF)
: SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
TargetFlags(TF) {
assert(Offset >= 0 && "Offset is too large");
Val.ConstVal = c;
}
ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v,
EVT VT, int o, unsigned Align, unsigned char TF)
: SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
TargetFlags(TF) {
assert(Offset >= 0 && "Offset is too large");
Val.MachineCPVal = v;
Offset |= 1 << (sizeof(unsigned)*CHAR_BIT-1);
}
public:
bool isMachineConstantPoolEntry() const {
return Offset < 0;
}
const Constant *getConstVal() const {
assert(!isMachineConstantPoolEntry() && "Wrong constantpool type");
return Val.ConstVal;
}
MachineConstantPoolValue *getMachineCPVal() const {
assert(isMachineConstantPoolEntry() && "Wrong constantpool type");
return Val.MachineCPVal;
}
int getOffset() const {
return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT-1));
}
// Return the alignment of this constant pool object, which is either 0 (for
// default alignment) or the desired value.
unsigned getAlignment() const { return Alignment; }
unsigned char getTargetFlags() const { return TargetFlags; }
Type *getType() const;
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::ConstantPool ||
N->getOpcode() == ISD::TargetConstantPool;
}
};
/// Completely target-dependent object reference.
class TargetIndexSDNode : public SDNode {
unsigned char TargetFlags;
int Index;
int64_t Offset;
friend class SelectionDAG;
public:
TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned char TF)
: SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
TargetFlags(TF), Index(Idx), Offset(Ofs) {}
public:
unsigned char getTargetFlags() const { return TargetFlags; }
int getIndex() const { return Index; }
int64_t getOffset() const { return Offset; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::TargetIndex;
}
};
class BasicBlockSDNode : public SDNode {
MachineBasicBlock *MBB;
friend class SelectionDAG;
/// Debug info is meaningful and potentially useful here, but we create
/// blocks out of order when they're jumped to, which makes it a bit
/// harder. Let's see if we need it first.
explicit BasicBlockSDNode(MachineBasicBlock *mbb)
: SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
{}
public:
MachineBasicBlock *getBasicBlock() const { return MBB; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::BasicBlock;
}
};
/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
class BuildVectorSDNode : public SDNode {
// These are constructed as SDNodes and then cast to BuildVectorSDNodes.
explicit BuildVectorSDNode() = delete;
public:
/// Check if this is a constant splat, and if so, find the
/// smallest element size that splats the vector. If MinSplatBits is
/// nonzero, the element size must be at least that large. Note that the
/// splat element may be the entire vector (i.e., a one element vector).
/// Returns the splat element value in SplatValue. Any undefined bits in
/// that value are zero, and the corresponding bits in the SplatUndef mask
/// are set. The SplatBitSize value is set to the splat element size in
/// bits. HasAnyUndefs is set to true if any bits in the vector are
/// undefined. isBigEndian describes the endianness of the target.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
unsigned &SplatBitSize, bool &HasAnyUndefs,
unsigned MinSplatBits = 0,
bool isBigEndian = false) const;
/// \brief Returns the splatted value or a null value if this is not a splat.
///
/// If passed a non-null UndefElements bitvector, it will resize it to match
/// the vector width and set the bits where elements are undef.
SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
/// \brief Returns the splatted constant or null if this is not a constant
/// splat.
///
/// If passed a non-null UndefElements bitvector, it will resize it to match
/// the vector width and set the bits where elements are undef.
ConstantSDNode *
getConstantSplatNode(BitVector *UndefElements = nullptr) const;
/// \brief Returns the splatted constant FP or null if this is not a constant
/// FP splat.
///
/// If passed a non-null UndefElements bitvector, it will resize it to match
/// the vector width and set the bits where elements are undef.
ConstantFPSDNode *
getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
bool isConstant() const;
static inline bool classof(const SDNode *N) {
return N->getOpcode() == ISD::BUILD_VECTOR;
}
};
/// An SDNode that holds an arbitrary LLVM IR Value. This is
/// used when the SelectionDAG needs to make a simple reference to something
/// in the LLVM IR representation.
///
class SrcValueSDNode : public SDNode {
const Value *V;
friend class SelectionDAG;
/// Create a SrcValue for a general value.
explicit SrcValueSDNode(const Value *v)
: SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
public:
/// Return the contained Value.
const Value *getValue() const { return V; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::SRCVALUE;
}
};
class MDNodeSDNode : public SDNode {
const MDNode *MD;
friend class SelectionDAG;
explicit MDNodeSDNode(const MDNode *md)
: SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
{}
public:
const MDNode *getMD() const { return MD; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MDNODE_SDNODE;
}
};
class RegisterSDNode : public SDNode {
unsigned Reg;
friend class SelectionDAG;
RegisterSDNode(unsigned reg, EVT VT)
: SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {
}
public:
unsigned getReg() const { return Reg; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::Register;
}
};
class RegisterMaskSDNode : public SDNode {
// The memory for RegMask is not owned by the node.
const uint32_t *RegMask;
friend class SelectionDAG;
RegisterMaskSDNode(const uint32_t *mask)
: SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
RegMask(mask) {}
public:
const uint32_t *getRegMask() const { return RegMask; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::RegisterMask;
}
};
class BlockAddressSDNode : public SDNode {
const BlockAddress *BA;
int64_t Offset;
unsigned char TargetFlags;
friend class SelectionDAG;
BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
int64_t o, unsigned char Flags)
: SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
BA(ba), Offset(o), TargetFlags(Flags) {
}
public:
const BlockAddress *getBlockAddress() const { return BA; }
int64_t getOffset() const { return Offset; }
unsigned char getTargetFlags() const { return TargetFlags; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::BlockAddress ||
N->getOpcode() == ISD::TargetBlockAddress;
}
};
class EHLabelSDNode : public SDNode {
SDUse Chain;
MCSymbol *Label;
friend class SelectionDAG;
EHLabelSDNode(unsigned Order, DebugLoc dl, SDValue ch, MCSymbol *L)
: SDNode(ISD::EH_LABEL, Order, dl, getSDVTList(MVT::Other)), Label(L) {
InitOperands(&Chain, ch);
}
public:
MCSymbol *getLabel() const { return Label; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::EH_LABEL;
}
};
class ExternalSymbolSDNode : public SDNode {
const char *Symbol;
unsigned char TargetFlags;
friend class SelectionDAG;
ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned char TF, EVT VT)
: SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol,
0, DebugLoc(), getSDVTList(VT)), Symbol(Sym), TargetFlags(TF) {
}
public:
const char *getSymbol() const { return Symbol; }
unsigned char getTargetFlags() const { return TargetFlags; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::ExternalSymbol ||
N->getOpcode() == ISD::TargetExternalSymbol;
}
};
class MCSymbolSDNode : public SDNode {
MCSymbol *Symbol;
friend class SelectionDAG;
MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
: SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
public:
MCSymbol *getMCSymbol() const { return Symbol; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MCSymbol;
}
};
class CondCodeSDNode : public SDNode {
ISD::CondCode Condition;
friend class SelectionDAG;
explicit CondCodeSDNode(ISD::CondCode Cond)
: SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
Condition(Cond) {
}
public:
ISD::CondCode get() const { return Condition; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::CONDCODE;
}
};
/// NOTE: avoid using this node as this may disappear in the
/// future and most targets don't support it.
class CvtRndSatSDNode : public SDNode {
ISD::CvtCode CvtCode;
friend class SelectionDAG;
explicit CvtRndSatSDNode(EVT VT, unsigned Order, DebugLoc dl,
ArrayRef<SDValue> Ops, ISD::CvtCode Code)
: SDNode(ISD::CONVERT_RNDSAT, Order, dl, getSDVTList(VT), Ops),
CvtCode(Code) {
assert(Ops.size() == 5 && "wrong number of operations");
}
public:
ISD::CvtCode getCvtCode() const { return CvtCode; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::CONVERT_RNDSAT;
}
};
/// This class is used to represent EVT's, which are used
/// to parameterize some operations.
class VTSDNode : public SDNode {
EVT ValueType;
friend class SelectionDAG;
explicit VTSDNode(EVT VT)
: SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
ValueType(VT) {
}
public:
EVT getVT() const { return ValueType; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::VALUETYPE;
}
};
/// Base class for LoadSDNode and StoreSDNode
class LSBaseSDNode : public MemSDNode {
//! Operand array for load and store
/*!
\note Moving this array to the base class captures more
common functionality shared between LoadSDNode and
StoreSDNode
*/
SDUse Ops[4];
public:
LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, DebugLoc dl,
SDValue *Operands, unsigned numOperands,
SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
MachineMemOperand *MMO)
: MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
SubclassData |= AM << 2;
assert(getAddressingMode() == AM && "MemIndexedMode encoding error!");
InitOperands(Ops, Operands, numOperands);
assert((getOffset().getOpcode() == ISD::UNDEF || isIndexed()) &&
"Only indexed loads and stores have a non-undef offset operand");
}
const SDValue &getOffset() const {
return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
}
/// Return the addressing mode for this load or store:
/// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
ISD::MemIndexedMode getAddressingMode() const {
return ISD::MemIndexedMode((SubclassData >> 2) & 7);
}
/// Return true if this is a pre/post inc/dec load/store.
bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
/// Return true if this is NOT a pre/post inc/dec load/store.
bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::LOAD ||
N->getOpcode() == ISD::STORE;
}
};
/// This class is used to represent ISD::LOAD nodes.
class LoadSDNode : public LSBaseSDNode {
friend class SelectionDAG;
LoadSDNode(SDValue *ChainPtrOff, unsigned Order, DebugLoc dl, SDVTList VTs,
ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
MachineMemOperand *MMO)
: LSBaseSDNode(ISD::LOAD, Order, dl, ChainPtrOff, 3, VTs, AM, MemVT, MMO) {
SubclassData |= (unsigned short)ETy;
assert(getExtensionType() == ETy && "LoadExtType encoding error!");
assert(readMem() && "Load MachineMemOperand is not a load!");
assert(!writeMem() && "Load MachineMemOperand is a store!");
}
public:
/// Return whether this is a plain node,
/// or one of the varieties of value-extending loads.
ISD::LoadExtType getExtensionType() const {
return ISD::LoadExtType(SubclassData & 3);
}
const SDValue &getBasePtr() const { return getOperand(1); }
const SDValue &getOffset() const { return getOperand(2); }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::LOAD;
}
};
/// This class is used to represent ISD::STORE nodes.
class StoreSDNode : public LSBaseSDNode {
friend class SelectionDAG;
StoreSDNode(SDValue *ChainValuePtrOff, unsigned Order, DebugLoc dl,
SDVTList VTs, ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
MachineMemOperand *MMO)
: LSBaseSDNode(ISD::STORE, Order, dl, ChainValuePtrOff, 4,
VTs, AM, MemVT, MMO) {
SubclassData |= (unsigned short)isTrunc;
assert(isTruncatingStore() == isTrunc && "isTrunc encoding error!");
assert(!readMem() && "Store MachineMemOperand is a load!");
assert(writeMem() && "Store MachineMemOperand is not a store!");
}
public:
/// Return true if the op does a truncation before store.
/// For integers this is the same as doing a TRUNCATE and storing the result.
/// For floats, it is the same as doing an FP_ROUND and storing the result.
bool isTruncatingStore() const { return SubclassData & 1; }
const SDValue &getValue() const { return getOperand(1); }
const SDValue &getBasePtr() const { return getOperand(2); }
const SDValue &getOffset() const { return getOperand(3); }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::STORE;
}
};
/// This base class is used to represent MLOAD and MSTORE nodes
class MaskedLoadStoreSDNode : public MemSDNode {
// Operands
SDUse Ops[4];
public:
friend class SelectionDAG;
MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order, DebugLoc dl,
SDValue *Operands, unsigned numOperands,
SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
: MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
InitOperands(Ops, Operands, numOperands);
}
// In the both nodes address is Op1, mask is Op2:
// MaskedLoadSDNode (Chain, ptr, mask, src0), src0 is a passthru value
// MaskedStoreSDNode (Chain, ptr, mask, data)
// Mask is a vector of i1 elements
const SDValue &getBasePtr() const { return getOperand(1); }
const SDValue &getMask() const { return getOperand(2); }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MLOAD ||
N->getOpcode() == ISD::MSTORE;
}
};
/// This class is used to represent an MLOAD node
class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
public:
friend class SelectionDAG;
MaskedLoadSDNode(unsigned Order, DebugLoc dl, SDValue *Operands,
unsigned numOperands, SDVTList VTs, ISD::LoadExtType ETy,
EVT MemVT, MachineMemOperand *MMO)
: MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, Operands, numOperands,
VTs, MemVT, MMO) {
SubclassData |= (unsigned short)ETy;
}
ISD::LoadExtType getExtensionType() const {
return ISD::LoadExtType(SubclassData & 3);
}
const SDValue &getSrc0() const { return getOperand(3); }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MLOAD;
}
};
/// This class is used to represent an MSTORE node
class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
public:
friend class SelectionDAG;
MaskedStoreSDNode(unsigned Order, DebugLoc dl, SDValue *Operands,
unsigned numOperands, SDVTList VTs, bool isTrunc, EVT MemVT,
MachineMemOperand *MMO)
: MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, Operands, numOperands,
VTs, MemVT, MMO) {
SubclassData |= (unsigned short)isTrunc;
}
/// Return true if the op does a truncation before store.
/// For integers this is the same as doing a TRUNCATE and storing the result.
/// For floats, it is the same as doing an FP_ROUND and storing the result.
bool isTruncatingStore() const { return SubclassData & 1; }
const SDValue &getValue() const { return getOperand(3); }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MSTORE;
}
};
/// This is a base class used to represent
/// MGATHER and MSCATTER nodes
///
class MaskedGatherScatterSDNode : public MemSDNode {
// Operands
SDUse Ops[5];
public:
friend class SelectionDAG;
MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order, DebugLoc dl,
ArrayRef<SDValue> Operands, SDVTList VTs, EVT MemVT,
MachineMemOperand *MMO)
: MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
assert(Operands.size() == 5 && "Incompatible number of operands");
InitOperands(Ops, Operands.data(), Operands.size());
}
// In the both nodes address is Op1, mask is Op2:
// MaskedGatherSDNode (Chain, src0, mask, base, index), src0 is a passthru value
// MaskedScatterSDNode (Chain, value, mask, base, index)
// Mask is a vector of i1 elements
const SDValue &getBasePtr() const { return getOperand(3); }
const SDValue &getIndex() const { return getOperand(4); }
const SDValue &getMask() const { return getOperand(2); }
const SDValue &getValue() const { return getOperand(1); }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MGATHER ||
N->getOpcode() == ISD::MSCATTER;
}
};
/// This class is used to represent an MGATHER node
///
class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
public:
friend class SelectionDAG;
MaskedGatherSDNode(unsigned Order, DebugLoc dl, ArrayRef<SDValue> Operands,
SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
: MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, Operands, VTs, MemVT,
MMO) {
assert(getValue().getValueType() == getValueType(0) &&
"Incompatible type of the PathThru value in MaskedGatherSDNode");
assert(getMask().getValueType().getVectorNumElements() ==
getValueType(0).getVectorNumElements() &&
"Vector width mismatch between mask and data");
assert(getMask().getValueType().getScalarType() == MVT::i1 &&
"Vector width mismatch between mask and data");
}
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MGATHER;
}
};
/// This class is used to represent an MSCATTER node
///
class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
public:
friend class SelectionDAG;
MaskedScatterSDNode(unsigned Order, DebugLoc dl,ArrayRef<SDValue> Operands,
SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
: MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, Operands, VTs, MemVT,
MMO) {
assert(getMask().getValueType().getVectorNumElements() ==
getValue().getValueType().getVectorNumElements() &&
"Vector width mismatch between mask and data");
assert(getMask().getValueType().getScalarType() == MVT::i1 &&
"Vector width mismatch between mask and data");
}
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MSCATTER;
}
};
/// An SDNode that represents everything that will be needed
/// to construct a MachineInstr. These nodes are created during the
/// instruction selection proper phase.
class MachineSDNode : public SDNode {
public:
typedef MachineMemOperand **mmo_iterator;
private:
friend class SelectionDAG;
MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc DL, SDVTList VTs)
: SDNode(Opc, Order, DL, VTs), MemRefs(nullptr), MemRefsEnd(nullptr) {}
/// Operands for this instruction, if they fit here. If
/// they don't, this field is unused.
SDUse LocalOperands[4];
/// Memory reference descriptions for this instruction.
mmo_iterator MemRefs;
mmo_iterator MemRefsEnd;
public:
mmo_iterator memoperands_begin() const { return MemRefs; }
mmo_iterator memoperands_end() const { return MemRefsEnd; }
bool memoperands_empty() const { return MemRefsEnd == MemRefs; }
/// Assign this MachineSDNodes's memory reference descriptor
/// list. This does not transfer ownership.
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
for (mmo_iterator MMI = NewMemRefs, MME = NewMemRefsEnd; MMI != MME; ++MMI)
assert(*MMI && "Null mem ref detected!");
MemRefs = NewMemRefs;
MemRefsEnd = NewMemRefsEnd;
}
static bool classof(const SDNode *N) {
return N->isMachineOpcode();
}
};
class SDNodeIterator {
const SDNode *Node;
unsigned Operand;
SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
public:
using iterator_category = std::forward_iterator_tag;
using value_type = SDNode;
using difference_type = std::ptrdiff_t;
using pointer = value_type *;
using reference = value_type &;
bool operator==(const SDNodeIterator& x) const {
return Operand == x.Operand;
}
bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
pointer operator*() const {
return Node->getOperand(Operand).getNode();
}
pointer operator->() const { return operator*(); }
SDNodeIterator& operator++() { // Preincrement
++Operand;
return *this;
}
SDNodeIterator operator++(int) { // Postincrement
SDNodeIterator tmp = *this; ++*this; return tmp;
}
size_t operator-(SDNodeIterator Other) const {
assert(Node == Other.Node &&
"Cannot compare iterators of two different nodes!");
return Operand - Other.Operand;
}
static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
static SDNodeIterator end (const SDNode *N) {
return SDNodeIterator(N, N->getNumOperands());
}
unsigned getOperand() const { return Operand; }
const SDNode *getNode() const { return Node; }
};
template <> struct GraphTraits<SDNode*> {
typedef SDNode NodeType;
typedef SDNodeIterator ChildIteratorType;
static inline NodeType *getEntryNode(SDNode *N) { return N; }
static inline ChildIteratorType child_begin(NodeType *N) {
return SDNodeIterator::begin(N);
}
static inline ChildIteratorType child_end(NodeType *N) {
return SDNodeIterator::end(N);
}
};
/// The largest SDNode class.
typedef MaskedGatherScatterSDNode LargestSDNode;
/// The SDNode class with the greatest alignment requirement.
typedef GlobalAddressSDNode MostAlignedSDNode;
namespace ISD {
/// Returns true if the specified node is a non-extending and unindexed load.
inline bool isNormalLoad(const SDNode *N) {
const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
Ld->getAddressingMode() == ISD::UNINDEXED;
}
/// Returns true if the specified node is a non-extending load.
inline bool isNON_EXTLoad(const SDNode *N) {
return isa<LoadSDNode>(N) &&
cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
}
/// Returns true if the specified node is a EXTLOAD.
inline bool isEXTLoad(const SDNode *N) {
return isa<LoadSDNode>(N) &&
cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
}
/// Returns true if the specified node is a SEXTLOAD.
inline bool isSEXTLoad(const SDNode *N) {
return isa<LoadSDNode>(N) &&
cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
}
/// Returns true if the specified node is a ZEXTLOAD.
inline bool isZEXTLoad(const SDNode *N) {
return isa<LoadSDNode>(N) &&
cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
}
/// Returns true if the specified node is an unindexed load.
inline bool isUNINDEXEDLoad(const SDNode *N) {
return isa<LoadSDNode>(N) &&
cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
}
/// Returns true if the specified node is a non-truncating
/// and unindexed store.
inline bool isNormalStore(const SDNode *N) {
const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
return St && !St->isTruncatingStore() &&
St->getAddressingMode() == ISD::UNINDEXED;
}
/// Returns true if the specified node is a non-truncating store.
inline bool isNON_TRUNCStore(const SDNode *N) {
return isa<StoreSDNode>(N) && !cast<StoreSDNode>(N)->isTruncatingStore();
}
/// Returns true if the specified node is a truncating store.
inline bool isTRUNCStore(const SDNode *N) {
return isa<StoreSDNode>(N) && cast<StoreSDNode>(N)->isTruncatingStore();
}
/// Returns true if the specified node is an unindexed store.
inline bool isUNINDEXEDStore(const SDNode *N) {
return isa<StoreSDNode>(N) &&
cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
}
}
} // end llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/GCMetadataPrinter.h | //===-- llvm/CodeGen/GCMetadataPrinter.h - Prints asm GC tables -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The abstract base class GCMetadataPrinter supports writing GC metadata tables
// as assembly code. This is a separate class from GCStrategy in order to allow
// users of the LLVM JIT to avoid linking with the AsmWriter.
//
// Subclasses of GCMetadataPrinter must be registered using the
// GCMetadataPrinterRegistry. This is separate from the GCStrategy itself
// because these subclasses are logically plugins for the AsmWriter.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_GCMETADATAPRINTER_H
#define LLVM_CODEGEN_GCMETADATAPRINTER_H
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/Support/Registry.h"
namespace llvm {
class GCMetadataPrinter;
/// GCMetadataPrinterRegistry - The GC assembly printer registry uses all the
/// defaults from Registry.
typedef Registry<GCMetadataPrinter> GCMetadataPrinterRegistry;
/// GCMetadataPrinter - Emits GC metadata as assembly code. Instances are
/// created, managed, and owned by the AsmPrinter.
class GCMetadataPrinter {
private:
GCStrategy *S;
friend class AsmPrinter;
protected:
// May only be subclassed.
GCMetadataPrinter();
private:
GCMetadataPrinter(const GCMetadataPrinter &) = delete;
GCMetadataPrinter &operator=(const GCMetadataPrinter &) = delete;
public:
GCStrategy &getStrategy() { return *S; }
/// Called before the assembly for the module is generated by
/// the AsmPrinter (but after target specific hooks.)
virtual void beginAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) {}
/// Called after the assembly for the module is generated by
/// the AsmPrinter (but before target specific hooks)
virtual void finishAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) {}
virtual ~GCMetadataPrinter();
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineMemOperand.h | //==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MachineMemOperand class, which is a
// description of a memory reference. It is used to help track dependencies
// in the backend.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
#define LLVM_CODEGEN_MACHINEMEMOPERAND_H
#include "llvm/ADT/PointerUnion.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
#include "llvm/Support/DataTypes.h"
namespace llvm {
class FoldingSetNodeID;
class MDNode;
class raw_ostream;
class ModuleSlotTracker;
/// MachinePointerInfo - This class contains a discriminated union of
/// information about pointers in memory operands, relating them back to LLVM IR
/// or to virtual locations (such as frame indices) that are exposed during
/// codegen.
struct MachinePointerInfo {
/// V - This is the IR pointer value for the access, or it is null if unknown.
/// If this is null, then the access is to a pointer in the default address
/// space.
PointerUnion<const Value *, const PseudoSourceValue *> V;
/// Offset - This is an offset from the base Value*.
int64_t Offset;
explicit MachinePointerInfo(const Value *v = nullptr, int64_t offset = 0)
: V(v), Offset(offset) {}
explicit MachinePointerInfo(const PseudoSourceValue *v,
int64_t offset = 0)
: V(v), Offset(offset) {}
MachinePointerInfo getWithOffset(int64_t O) const {
if (V.isNull()) return MachinePointerInfo();
if (V.is<const Value*>())
return MachinePointerInfo(V.get<const Value*>(), Offset+O);
return MachinePointerInfo(V.get<const PseudoSourceValue*>(), Offset+O);
}
/// getAddrSpace - Return the LLVM IR address space number that this pointer
/// points into.
unsigned getAddrSpace() const;
/// getConstantPool - Return a MachinePointerInfo record that refers to the
/// constant pool.
static MachinePointerInfo getConstantPool();
/// getFixedStack - Return a MachinePointerInfo record that refers to the
/// the specified FrameIndex.
static MachinePointerInfo getFixedStack(int FI, int64_t offset = 0);
/// getJumpTable - Return a MachinePointerInfo record that refers to a
/// jump table entry.
static MachinePointerInfo getJumpTable();
/// getGOT - Return a MachinePointerInfo record that refers to a
/// GOT entry.
static MachinePointerInfo getGOT();
/// getStack - stack pointer relative access.
static MachinePointerInfo getStack(int64_t Offset);
};
// //
///////////////////////////////////////////////////////////////////////////////
/// MachineMemOperand - A description of a memory reference used in the backend.
/// Instead of holding a StoreInst or LoadInst, this class holds the address
/// Value of the reference along with a byte size and offset. This allows it
/// to describe lowered loads and stores. Also, the special PseudoSourceValue
/// objects can be used to represent loads and stores to memory locations
/// that aren't explicit in the regular LLVM IR.
///
class MachineMemOperand {
MachinePointerInfo PtrInfo;
uint64_t Size;
unsigned Flags;
AAMDNodes AAInfo;
const MDNode *Ranges;
public:
/// Flags values. These may be or'd together.
enum MemOperandFlags {
/// The memory access reads data.
MOLoad = 1,
/// The memory access writes data.
MOStore = 2,
/// The memory access is volatile.
MOVolatile = 4,
/// The memory access is non-temporal.
MONonTemporal = 8,
/// The memory access is invariant.
MOInvariant = 16,
// Target hints allow target passes to annotate memory operations.
MOTargetStartBit = 5,
MOTargetNumBits = 3,
// This is the number of bits we need to represent flags.
MOMaxBits = 8
};
/// MachineMemOperand - Construct an MachineMemOperand object with the
/// specified PtrInfo, flags, size, and base alignment.
MachineMemOperand(MachinePointerInfo PtrInfo, unsigned flags, uint64_t s,
unsigned base_alignment,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr);
const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
/// getValue - Return the base address of the memory access. This may either
/// be a normal LLVM IR Value, or one of the special values used in CodeGen.
/// Special values are those obtained via
/// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
/// other PseudoSourceValue member functions which return objects which stand
/// for frame/stack pointer relative references and other special references
/// which are not representable in the high-level IR.
const Value *getValue() const { return PtrInfo.V.dyn_cast<const Value*>(); }
const PseudoSourceValue *getPseudoValue() const {
return PtrInfo.V.dyn_cast<const PseudoSourceValue*>();
}
const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); }
/// getFlags - Return the raw flags of the source value, \see MemOperandFlags.
unsigned int getFlags() const { return Flags & ((1 << MOMaxBits) - 1); }
/// Bitwise OR the current flags with the given flags.
void setFlags(unsigned f) { Flags |= (f & ((1 << MOMaxBits) - 1)); }
/// getOffset - For normal values, this is a byte offset added to the base
/// address. For PseudoSourceValue::FPRel values, this is the FrameIndex
/// number.
int64_t getOffset() const { return PtrInfo.Offset; }
unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); }
/// getSize - Return the size in bytes of the memory reference.
uint64_t getSize() const { return Size; }
/// getAlignment - Return the minimum known alignment in bytes of the
/// actual memory reference.
uint64_t getAlignment() const;
/// getBaseAlignment - Return the minimum known alignment in bytes of the
/// base address, without the offset.
uint64_t getBaseAlignment() const { return (1u << (Flags >> MOMaxBits)) >> 1; }
/// getAAInfo - Return the AA tags for the memory reference.
AAMDNodes getAAInfo() const { return AAInfo; }
/// getRanges - Return the range tag for the memory reference.
const MDNode *getRanges() const { return Ranges; }
bool isLoad() const { return Flags & MOLoad; }
bool isStore() const { return Flags & MOStore; }
bool isVolatile() const { return Flags & MOVolatile; }
bool isNonTemporal() const { return Flags & MONonTemporal; }
bool isInvariant() const { return Flags & MOInvariant; }
/// isUnordered - Returns true if this memory operation doesn't have any
/// ordering constraints other than normal aliasing. Volatile and atomic
/// memory operations can't be reordered.
///
/// Currently, we don't model the difference between volatile and atomic
/// operations. They should retain their ordering relative to all memory
/// operations.
bool isUnordered() const { return !isVolatile(); }
/// refineAlignment - Update this MachineMemOperand to reflect the alignment
/// of MMO, if it has a greater alignment. This must only be used when the
/// new alignment applies to all users of this MachineMemOperand.
void refineAlignment(const MachineMemOperand *MMO);
/// setValue - Change the SourceValue for this MachineMemOperand. This
/// should only be used when an object is being relocated and all references
/// to it are being updated.
void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; }
void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
/// Profile - Gather unique data for the object.
///
void Profile(FoldingSetNodeID &ID) const;
/// Support for operator<<.
/// @{
void print(raw_ostream &OS) const;
void print(raw_ostream &OS, ModuleSlotTracker &MST) const;
/// @}
friend bool operator==(const MachineMemOperand &LHS,
const MachineMemOperand &RHS) {
return LHS.getValue() == RHS.getValue() &&
LHS.getPseudoValue() == RHS.getPseudoValue() &&
LHS.getSize() == RHS.getSize() &&
LHS.getOffset() == RHS.getOffset() &&
LHS.getFlags() == RHS.getFlags() &&
LHS.getAAInfo() == RHS.getAAInfo() &&
LHS.getRanges() == RHS.getRanges() &&
LHS.getAlignment() == RHS.getAlignment() &&
LHS.getAddrSpace() == RHS.getAddrSpace();
}
friend bool operator!=(const MachineMemOperand &LHS,
const MachineMemOperand &RHS) {
return !(LHS == RHS);
}
};
inline raw_ostream &operator<<(raw_ostream &OS, const MachineMemOperand &MRO) {
MRO.print(OS);
return OS;
}
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/DIE.h | //===--- lib/CodeGen/DIE.h - DWARF Info Entries -----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Data structures for DWARF info entries.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DIE_H
#define LLVM_LIB_CODEGEN_ASMPRINTER_DIE_H
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/DwarfStringPoolEntry.h"
#include "llvm/Support/Dwarf.h"
#include <vector>
namespace llvm {
class AsmPrinter;
class MCExpr;
class MCSymbol;
class raw_ostream;
class DwarfTypeUnit;
//===--------------------------------------------------------------------===//
/// DIEAbbrevData - Dwarf abbreviation data, describes one attribute of a
/// Dwarf abbreviation.
class DIEAbbrevData {
/// Attribute - Dwarf attribute code.
///
dwarf::Attribute Attribute;
/// Form - Dwarf form code.
///
dwarf::Form Form;
public:
DIEAbbrevData(dwarf::Attribute A, dwarf::Form F) : Attribute(A), Form(F) {}
// Accessors.
dwarf::Attribute getAttribute() const { return Attribute; }
dwarf::Form getForm() const { return Form; }
/// Profile - Used to gather unique data for the abbreviation folding set.
///
void Profile(FoldingSetNodeID &ID) const;
};
//===--------------------------------------------------------------------===//
/// DIEAbbrev - Dwarf abbreviation, describes the organization of a debug
/// information object.
class DIEAbbrev : public FoldingSetNode {
/// Unique number for node.
///
unsigned Number;
/// Tag - Dwarf tag code.
///
dwarf::Tag Tag;
/// Children - Whether or not this node has children.
///
// This cheats a bit in all of the uses since the values in the standard
// are 0 and 1 for no children and children respectively.
bool Children;
/// Data - Raw data bytes for abbreviation.
///
SmallVector<DIEAbbrevData, 12> Data;
public:
DIEAbbrev(dwarf::Tag T, bool C) : Tag(T), Children(C), Data() {}
// Accessors.
dwarf::Tag getTag() const { return Tag; }
unsigned getNumber() const { return Number; }
bool hasChildren() const { return Children; }
const SmallVectorImpl<DIEAbbrevData> &getData() const { return Data; }
void setChildrenFlag(bool hasChild) { Children = hasChild; }
void setNumber(unsigned N) { Number = N; }
/// AddAttribute - Adds another set of attribute information to the
/// abbreviation.
void AddAttribute(dwarf::Attribute Attribute, dwarf::Form Form) {
Data.push_back(DIEAbbrevData(Attribute, Form));
}
/// Profile - Used to gather unique data for the abbreviation folding set.
///
void Profile(FoldingSetNodeID &ID) const;
/// Emit - Print the abbreviation using the specified asm printer.
///
void Emit(const AsmPrinter *AP) const;
#ifndef NDEBUG
void print(raw_ostream &O);
void dump();
#endif
};
//===--------------------------------------------------------------------===//
/// DIEInteger - An integer value DIE.
///
class DIEInteger {
uint64_t Integer;
public:
explicit DIEInteger(uint64_t I) : Integer(I) {}
/// BestForm - Choose the best form for integer.
///
static dwarf::Form BestForm(bool IsSigned, uint64_t Int) {
if (IsSigned) {
const int64_t SignedInt = Int;
if ((char)Int == SignedInt)
return dwarf::DW_FORM_data1;
if ((short)Int == SignedInt)
return dwarf::DW_FORM_data2;
if ((int)Int == SignedInt)
return dwarf::DW_FORM_data4;
} else {
if ((unsigned char)Int == Int)
return dwarf::DW_FORM_data1;
if ((unsigned short)Int == Int)
return dwarf::DW_FORM_data2;
if ((unsigned int)Int == Int)
return dwarf::DW_FORM_data4;
}
return dwarf::DW_FORM_data8;
}
uint64_t getValue() const { return Integer; }
void setValue(uint64_t Val) { Integer = Val; }
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
#ifndef NDEBUG
void print(raw_ostream &O) const;
#endif
};
//===--------------------------------------------------------------------===//
/// DIEExpr - An expression DIE.
//
class DIEExpr {
const MCExpr *Expr;
public:
explicit DIEExpr(const MCExpr *E) : Expr(E) {}
/// getValue - Get MCExpr.
///
const MCExpr *getValue() const { return Expr; }
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
#ifndef NDEBUG
void print(raw_ostream &O) const;
#endif
};
//===--------------------------------------------------------------------===//
/// DIELabel - A label DIE.
//
class DIELabel {
const MCSymbol *Label;
public:
explicit DIELabel(const MCSymbol *L) : Label(L) {}
/// getValue - Get MCSymbol.
///
const MCSymbol *getValue() const { return Label; }
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
#ifndef NDEBUG
void print(raw_ostream &O) const;
#endif
};
//===--------------------------------------------------------------------===//
/// DIEDelta - A simple label difference DIE.
///
class DIEDelta {
const MCSymbol *LabelHi;
const MCSymbol *LabelLo;
public:
DIEDelta(const MCSymbol *Hi, const MCSymbol *Lo) : LabelHi(Hi), LabelLo(Lo) {}
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
#ifndef NDEBUG
void print(raw_ostream &O) const;
#endif
};
//===--------------------------------------------------------------------===//
/// DIEString - A container for string values.
///
class DIEString {
DwarfStringPoolEntryRef S;
public:
DIEString(DwarfStringPoolEntryRef S) : S(S) {}
/// getString - Grab the string out of the object.
StringRef getString() const { return S.getString(); }
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
#ifndef NDEBUG
void print(raw_ostream &O) const;
#endif
};
//===--------------------------------------------------------------------===//
/// DIEEntry - A pointer to another debug information entry. An instance of
/// this class can also be used as a proxy for a debug information entry not
/// yet defined (ie. types.)
class DIE;
class DIEEntry {
DIE *Entry;
DIEEntry() = delete;
public:
explicit DIEEntry(DIE &E) : Entry(&E) {}
DIE &getEntry() const { return *Entry; }
/// Returns size of a ref_addr entry.
static unsigned getRefAddrSize(const AsmPrinter *AP);
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
return Form == dwarf::DW_FORM_ref_addr ? getRefAddrSize(AP)
: sizeof(int32_t);
}
#ifndef NDEBUG
void print(raw_ostream &O) const;
#endif
};
//===--------------------------------------------------------------------===//
/// \brief A signature reference to a type unit.
class DIETypeSignature {
const DwarfTypeUnit *Unit;
DIETypeSignature() = delete;
public:
explicit DIETypeSignature(const DwarfTypeUnit &Unit) : Unit(&Unit) {}
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
assert(Form == dwarf::DW_FORM_ref_sig8);
return 8;
}
#ifndef NDEBUG
void print(raw_ostream &O) const;
#endif
};
//===--------------------------------------------------------------------===//
/// DIELocList - Represents a pointer to a location list in the debug_loc
/// section.
//
class DIELocList {
// Index into the .debug_loc vector.
size_t Index;
public:
DIELocList(size_t I) : Index(I) {}
/// getValue - Grab the current index out.
size_t getValue() const { return Index; }
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
#ifndef NDEBUG
void print(raw_ostream &O) const;
#endif
};
//===--------------------------------------------------------------------===//
/// DIEValue - A debug information entry value. Some of these roughly correlate
/// to DWARF attribute classes.
///
class DIEBlock;
class DIELoc;
class DIEValue {
public:
enum Type {
isNone,
#define HANDLE_DIEVALUE(T) is##T,
#include "llvm/CodeGen/DIEValue.def"
};
private:
/// Ty - Type of data stored in the value.
///
Type Ty = isNone;
dwarf::Attribute Attribute = (dwarf::Attribute)0;
dwarf::Form Form = (dwarf::Form)0;
/// Storage for the value.
///
/// All values that aren't standard layout (or are larger than 8 bytes)
/// should be stored by reference instead of by value.
typedef AlignedCharArrayUnion<DIEInteger, DIEString, DIEExpr, DIELabel,
DIEDelta *, DIEEntry, DIETypeSignature,
DIEBlock *, DIELoc *, DIELocList> ValTy;
static_assert(sizeof(ValTy) <= sizeof(uint64_t) ||
sizeof(ValTy) <= sizeof(void *),
"Expected all large types to be stored via pointer");
/// Underlying stored value.
ValTy Val;
template <class T> void construct(T V) {
static_assert(std::is_standard_layout<T>::value ||
std::is_pointer<T>::value,
"Expected standard layout or pointer");
new (reinterpret_cast<void *>(Val.buffer)) T(V);
}
template <class T> T *get() { return reinterpret_cast<T *>(Val.buffer); }
template <class T> const T *get() const {
return reinterpret_cast<const T *>(Val.buffer);
}
template <class T> void destruct() { get<T>()->~T(); }
/// Destroy the underlying value.
///
/// This should get optimized down to a no-op. We could skip it if we could
/// add a static assert on \a std::is_trivially_copyable(), but we currently
/// support versions of GCC that don't understand that.
void destroyVal() {
switch (Ty) {
case isNone:
return;
#define HANDLE_DIEVALUE_SMALL(T) \
case is##T: \
destruct<DIE##T>();
return;
#define HANDLE_DIEVALUE_LARGE(T) \
case is##T: \
destruct<const DIE##T *>();
return;
#include "llvm/CodeGen/DIEValue.def"
}
}
/// Copy the underlying value.
///
/// This should get optimized down to a simple copy. We need to actually
/// construct the value, rather than calling memcpy, to satisfy strict
/// aliasing rules.
void copyVal(const DIEValue &X) {
switch (Ty) {
case isNone:
return;
#define HANDLE_DIEVALUE_SMALL(T) \
case is##T: \
construct<DIE##T>(*X.get<DIE##T>()); \
return;
#define HANDLE_DIEVALUE_LARGE(T) \
case is##T: \
construct<const DIE##T *>(*X.get<const DIE##T *>()); \
return;
#include "llvm/CodeGen/DIEValue.def"
}
}
public:
DIEValue() = default;
DIEValue(const DIEValue &X) : Ty(X.Ty), Attribute(X.Attribute), Form(X.Form) {
copyVal(X);
}
DIEValue &operator=(const DIEValue &X) {
destroyVal();
Ty = X.Ty;
Attribute = X.Attribute;
Form = X.Form;
copyVal(X);
return *this;
}
~DIEValue() { destroyVal(); }
#define HANDLE_DIEVALUE_SMALL(T) \
DIEValue(dwarf::Attribute Attribute, dwarf::Form Form, const DIE##T &V) \
: Ty(is##T), Attribute(Attribute), Form(Form) { \
construct<DIE##T>(V); \
}
#define HANDLE_DIEVALUE_LARGE(T) \
DIEValue(dwarf::Attribute Attribute, dwarf::Form Form, const DIE##T *V) \
: Ty(is##T), Attribute(Attribute), Form(Form) { \
assert(V && "Expected valid value"); \
construct<const DIE##T *>(V); \
}
#include "llvm/CodeGen/DIEValue.def"
// Accessors
Type getType() const { return Ty; }
dwarf::Attribute getAttribute() const { return Attribute; }
dwarf::Form getForm() const { return Form; }
explicit operator bool() const { return Ty; }
#define HANDLE_DIEVALUE_SMALL(T) \
const DIE##T &getDIE##T() const { \
assert(getType() == is##T && "Expected " #T); \
return *get<DIE##T>(); \
}
#define HANDLE_DIEVALUE_LARGE(T) \
const DIE##T &getDIE##T() const { \
assert(getType() == is##T && "Expected " #T); \
return **get<const DIE##T *>(); \
}
#include "llvm/CodeGen/DIEValue.def"
/// EmitValue - Emit value via the Dwarf writer.
///
void EmitValue(const AsmPrinter *AP) const;
/// SizeOf - Return the size of a value in bytes.
///
unsigned SizeOf(const AsmPrinter *AP) const;
#ifndef NDEBUG
void print(raw_ostream &O) const;
void dump() const;
#endif
};
struct IntrusiveBackListNode {
PointerIntPair<IntrusiveBackListNode *, 1> Next;
IntrusiveBackListNode() : Next(this, true) {}
IntrusiveBackListNode *getNext() const {
return Next.getInt() ? nullptr : Next.getPointer();
}
};
struct IntrusiveBackListBase {
typedef IntrusiveBackListNode Node;
Node *Last = nullptr;
bool empty() const { return !Last; }
void push_back(Node &N) {
assert(N.Next.getPointer() == &N && "Expected unlinked node");
assert(N.Next.getInt() && "Expected unlinked node"); // HLSL Change - was int == bool
if (Last) {
N.Next = Last->Next;
Last->Next.setPointerAndInt(&N, false);
}
Last = &N;
}
};
template <class T> class IntrusiveBackList : IntrusiveBackListBase {
public:
using IntrusiveBackListBase::empty;
void push_back(T &N) { IntrusiveBackListBase::push_back(N); }
T &back() { return *static_cast<T *>(Last); }
const T &back() const { return *static_cast<T *>(Last); }
class const_iterator;
class iterator
: public iterator_facade_base<iterator, std::forward_iterator_tag, T> {
friend class const_iterator;
Node *N = nullptr;
public:
iterator() = default;
explicit iterator(T *N) : N(N) {}
iterator &operator++() {
N = N->getNext();
return *this;
}
explicit operator bool() const { return N; }
T &operator*() const { return *static_cast<T *>(N); }
bool operator==(const iterator &X) const { return N == X.N; }
bool operator!=(const iterator &X) const { return N != X.N; }
};
class const_iterator
: public iterator_facade_base<const_iterator, std::forward_iterator_tag,
const T> {
const Node *N = nullptr;
public:
const_iterator() = default;
// Placate MSVC by explicitly scoping 'iterator'.
const_iterator(typename IntrusiveBackList<T>::iterator X) : N(X.N) {}
explicit const_iterator(const T *N) : N(N) {}
const_iterator &operator++() {
N = N->getNext();
return *this;
}
explicit operator bool() const { return N; }
const T &operator*() const { return *static_cast<const T *>(N); }
bool operator==(const const_iterator &X) const { return N == X.N; }
bool operator!=(const const_iterator &X) const { return N != X.N; }
};
iterator begin() {
return Last ? iterator(static_cast<T *>(Last->Next.getPointer())) : end();
}
const_iterator begin() const {
return const_cast<IntrusiveBackList *>(this)->begin();
}
iterator end() { return iterator(); }
const_iterator end() const { return const_iterator(); }
static iterator toIterator(T &N) { return iterator(&N); }
static const_iterator toIterator(const T &N) { return const_iterator(&N); }
};
/// A list of DIE values.
///
/// This is a singly-linked list, but instead of reversing the order of
/// insertion, we keep a pointer to the back of the list so we can push in
/// order.
///
/// There are two main reasons to choose a linked list over a customized
/// vector-like data structure.
///
/// 1. For teardown efficiency, we want DIEs to be BumpPtrAllocated. Using a
/// linked list here makes this way easier to accomplish.
/// 2. Carrying an extra pointer per \a DIEValue isn't expensive. 45% of DIEs
/// have 2 or fewer values, and 90% have 5 or fewer. A vector would be
/// over-allocated by 50% on average anyway, the same cost as the
/// linked-list node.
class DIEValueList {
struct Node : IntrusiveBackListNode {
DIEValue V;
explicit Node(DIEValue V) : V(V) {}
};
typedef IntrusiveBackList<Node> ListTy;
ListTy List;
public:
bool empty() const { return List.empty(); }
class const_iterator;
class iterator
: public iterator_adaptor_base<iterator, ListTy::iterator,
std::forward_iterator_tag, DIEValue> {
friend class const_iterator;
typedef iterator_adaptor_base<iterator, ListTy::iterator,
std::forward_iterator_tag,
DIEValue> iterator_adaptor;
public:
iterator() = default;
explicit iterator(ListTy::iterator X) : iterator_adaptor(X) {}
explicit operator bool() const { return bool(wrapped()); }
DIEValue &operator*() const { return wrapped()->V; }
};
class const_iterator
: public iterator_adaptor_base<const_iterator, ListTy::const_iterator,
std::forward_iterator_tag,
const DIEValue> {
typedef iterator_adaptor_base<const_iterator, ListTy::const_iterator,
std::forward_iterator_tag,
const DIEValue> iterator_adaptor;
public:
const_iterator() = default;
const_iterator(DIEValueList::iterator X) : iterator_adaptor(X.wrapped()) {}
explicit const_iterator(ListTy::const_iterator X) : iterator_adaptor(X) {}
explicit operator bool() const { return bool(wrapped()); }
const DIEValue &operator*() const { return wrapped()->V; }
};
iterator insert(BumpPtrAllocator &Alloc, DIEValue V) {
List.push_back(*new (Alloc) Node(V));
return iterator(ListTy::toIterator(List.back()));
}
template <class... Ts>
iterator emplace(BumpPtrAllocator &Alloc, Ts &&... Args) {
return insert(Alloc, DIEValue(std::forward<Ts>(Args)...));
}
iterator begin() { return iterator(List.begin()); }
iterator end() { return iterator(List.end()); }
const_iterator begin() const { return const_iterator(List.begin()); }
const_iterator end() const { return const_iterator(List.end()); }
};
//===--------------------------------------------------------------------===//
/// DIE - A structured debug information entry. Has an abbreviation which
/// describes its organization.
class DIE : IntrusiveBackListNode {
friend class IntrusiveBackList<DIE>;
protected:
/// Offset - Offset in debug info section.
///
unsigned Offset;
/// Size - Size of instance + children.
///
unsigned Size;
unsigned AbbrevNumber = ~0u;
/// Tag - Dwarf tag code.
///
dwarf::Tag Tag = (dwarf::Tag)0;
/// Children DIEs.
IntrusiveBackList<DIE> Children;
DIE *Parent = nullptr;
/// Attribute values.
///
DIEValueList Values;
protected:
DIE() : Offset(0), Size(0) {}
private:
explicit DIE(dwarf::Tag Tag) : Offset(0), Size(0), Tag(Tag) {}
public:
static DIE *get(BumpPtrAllocator &Alloc, dwarf::Tag Tag) {
return new (Alloc) DIE(Tag);
}
// Accessors.
unsigned getAbbrevNumber() const { return AbbrevNumber; }
dwarf::Tag getTag() const { return Tag; }
unsigned getOffset() const { return Offset; }
unsigned getSize() const { return Size; }
bool hasChildren() const { return !Children.empty(); }
typedef IntrusiveBackList<DIE>::iterator child_iterator;
typedef IntrusiveBackList<DIE>::const_iterator const_child_iterator;
typedef iterator_range<child_iterator> child_range;
typedef iterator_range<const_child_iterator> const_child_range;
child_range children() {
return llvm::make_range(Children.begin(), Children.end());
}
const_child_range children() const {
return llvm::make_range(Children.begin(), Children.end());
}
typedef DIEValueList::iterator value_iterator;
typedef iterator_range<value_iterator> value_range;
value_range values() {
return llvm::make_range(Values.begin(), Values.end());
}
typedef DIEValueList::const_iterator const_value_iterator;
typedef iterator_range<const_value_iterator> const_value_range;
const_value_range values() const {
return llvm::make_range(Values.begin(), Values.end());
}
DIE *getParent() const { return Parent; }
/// Generate the abbreviation for this DIE.
///
/// Calculate the abbreviation for this, which should be uniqued and
/// eventually used to call \a setAbbrevNumber().
DIEAbbrev generateAbbrev() const;
/// Set the abbreviation number for this DIE.
void setAbbrevNumber(unsigned I) { AbbrevNumber = I; }
/// Climb up the parent chain to get the compile or type unit DIE this DIE
/// belongs to.
const DIE *getUnit() const;
/// Similar to getUnit, returns null when DIE is not added to an
/// owner yet.
const DIE *getUnitOrNull() const;
void setOffset(unsigned O) { Offset = O; }
void setSize(unsigned S) { Size = S; }
/// addValue - Add a value and attributes to a DIE.
///
value_iterator addValue(BumpPtrAllocator &Alloc, DIEValue Value) {
return Values.insert(Alloc, Value);
}
template <class T>
value_iterator addValue(BumpPtrAllocator &Alloc, dwarf::Attribute Attribute,
dwarf::Form Form, T &&Value) {
return Values.emplace(Alloc, Attribute, Form, std::forward<T>(Value));
}
/// Add a child to the DIE.
DIE &addChild(DIE *Child) {
assert(!Child->getParent() && "Child should be orphaned");
Child->Parent = this;
Children.push_back(*Child);
return Children.back();
}
/// Find a value in the DIE with the attribute given.
///
/// Returns a default-constructed DIEValue (where \a DIEValue::getType()
/// gives \a DIEValue::isNone) if no such attribute exists.
DIEValue findAttribute(dwarf::Attribute Attribute) const;
#ifndef NDEBUG
void print(raw_ostream &O, unsigned IndentCount = 0) const;
void dump();
#endif
};
//===--------------------------------------------------------------------===//
/// DIELoc - Represents an expression location.
//
class DIELoc : public DIE {
mutable unsigned Size; // Size in bytes excluding size header.
public:
DIELoc() : Size(0) {}
/// ComputeSize - Calculate the size of the location expression.
///
unsigned ComputeSize(const AsmPrinter *AP) const;
/// BestForm - Choose the best form for data.
///
dwarf::Form BestForm(unsigned DwarfVersion) const {
if (DwarfVersion > 3)
return dwarf::DW_FORM_exprloc;
// Pre-DWARF4 location expressions were blocks and not exprloc.
if ((unsigned char)Size == Size)
return dwarf::DW_FORM_block1;
if ((unsigned short)Size == Size)
return dwarf::DW_FORM_block2;
if ((unsigned int)Size == Size)
return dwarf::DW_FORM_block4;
return dwarf::DW_FORM_block;
}
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
#ifndef NDEBUG
void print(raw_ostream &O) const;
#endif
};
//===--------------------------------------------------------------------===//
/// DIEBlock - Represents a block of values.
//
class DIEBlock : public DIE {
mutable unsigned Size; // Size in bytes excluding size header.
public:
DIEBlock() : Size(0) {}
/// ComputeSize - Calculate the size of the location expression.
///
unsigned ComputeSize(const AsmPrinter *AP) const;
/// BestForm - Choose the best form for data.
///
dwarf::Form BestForm() const {
if ((unsigned char)Size == Size)
return dwarf::DW_FORM_block1;
if ((unsigned short)Size == Size)
return dwarf::DW_FORM_block2;
if ((unsigned int)Size == Size)
return dwarf::DW_FORM_block4;
return dwarf::DW_FORM_block;
}
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
#ifndef NDEBUG
void print(raw_ostream &O) const;
#endif
};
} // end llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineDominators.h | //=- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines classes mirroring those in llvm/Analysis/Dominators.h,
// but for target-specific code rather than target-independent IR.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEDOMINATORS_H
#define LLVM_CODEGEN_MACHINEDOMINATORS_H
#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/GenericDomTree.h"
#include "llvm/Support/GenericDomTreeConstruction.h"
namespace llvm {
template<>
inline void DominatorTreeBase<MachineBasicBlock>::addRoot(MachineBasicBlock* MBB) {
this->Roots.push_back(MBB);
}
extern template class DomTreeNodeBase<MachineBasicBlock>;
extern template class DominatorTreeBase<MachineBasicBlock>;
typedef DomTreeNodeBase<MachineBasicBlock> MachineDomTreeNode;
//===-------------------------------------
/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
/// compute a normal dominator tree.
///
class MachineDominatorTree : public MachineFunctionPass {
/// \brief Helper structure used to hold all the basic blocks
/// involved in the split of a critical edge.
struct CriticalEdge {
MachineBasicBlock *FromBB;
MachineBasicBlock *ToBB;
MachineBasicBlock *NewBB;
};
/// \brief Pile up all the critical edges to be split.
/// The splitting of a critical edge is local and thus, it is possible
/// to apply several of those changes at the same time.
mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
/// \brief Remember all the basic blocks that are inserted during
/// edge splitting.
/// Invariant: NewBBs == all the basic blocks contained in the NewBB
/// field of all the elements of CriticalEdgesToSplit.
/// I.e., forall elt in CriticalEdgesToSplit, it exists BB in NewBBs
/// such as BB == elt.NewBB.
mutable SmallSet<MachineBasicBlock *, 32> NewBBs;
/// \brief Apply all the recorded critical edges to the DT.
/// This updates the underlying DT information in a way that uses
/// the fast query path of DT as much as possible.
///
/// \post CriticalEdgesToSplit.empty().
void applySplitCriticalEdges() const;
public:
static char ID; // Pass ID, replacement for typeid
DominatorTreeBase<MachineBasicBlock>* DT;
MachineDominatorTree();
~MachineDominatorTree() override;
DominatorTreeBase<MachineBasicBlock> &getBase() {
applySplitCriticalEdges();
return *DT;
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
/// getRoots - Return the root blocks of the current CFG. This may include
/// multiple blocks if we are computing post dominators. For forward
/// dominators, this will always be a single block (the entry node).
///
inline const std::vector<MachineBasicBlock*> &getRoots() const {
applySplitCriticalEdges();
return DT->getRoots();
}
inline MachineBasicBlock *getRoot() const {
applySplitCriticalEdges();
return DT->getRoot();
}
inline MachineDomTreeNode *getRootNode() const {
applySplitCriticalEdges();
return DT->getRootNode();
}
bool runOnMachineFunction(MachineFunction &F) override;
inline bool dominates(const MachineDomTreeNode* A,
const MachineDomTreeNode* B) const {
applySplitCriticalEdges();
return DT->dominates(A, B);
}
inline bool dominates(const MachineBasicBlock* A,
const MachineBasicBlock* B) const {
applySplitCriticalEdges();
return DT->dominates(A, B);
}
// dominates - Return true if A dominates B. This performs the
// special checks necessary if A and B are in the same basic block.
bool dominates(const MachineInstr *A, const MachineInstr *B) const {
applySplitCriticalEdges();
const MachineBasicBlock *BBA = A->getParent(), *BBB = B->getParent();
if (BBA != BBB) return DT->dominates(BBA, BBB);
// Loop through the basic block until we find A or B.
MachineBasicBlock::const_iterator I = BBA->begin();
for (; &*I != A && &*I != B; ++I)
/*empty*/ ;
//if(!DT.IsPostDominators) {
// A dominates B if it is found first in the basic block.
return &*I == A;
//} else {
// // A post-dominates B if B is found first in the basic block.
// return &*I == B;
//}
}
inline bool properlyDominates(const MachineDomTreeNode* A,
const MachineDomTreeNode* B) const {
applySplitCriticalEdges();
return DT->properlyDominates(A, B);
}
inline bool properlyDominates(const MachineBasicBlock* A,
const MachineBasicBlock* B) const {
applySplitCriticalEdges();
return DT->properlyDominates(A, B);
}
/// findNearestCommonDominator - Find nearest common dominator basic block
/// for basic block A and B. If there is no such block then return NULL.
inline MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
MachineBasicBlock *B) {
applySplitCriticalEdges();
return DT->findNearestCommonDominator(A, B);
}
inline MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
applySplitCriticalEdges();
return DT->getNode(BB);
}
/// getNode - return the (Post)DominatorTree node for the specified basic
/// block. This is the same as using operator[] on this class.
///
inline MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
applySplitCriticalEdges();
return DT->getNode(BB);
}
/// addNewBlock - Add a new node to the dominator tree information. This
/// creates a new node as a child of DomBB dominator node,linking it into
/// the children list of the immediate dominator.
inline MachineDomTreeNode *addNewBlock(MachineBasicBlock *BB,
MachineBasicBlock *DomBB) {
applySplitCriticalEdges();
return DT->addNewBlock(BB, DomBB);
}
/// changeImmediateDominator - This method is used to update the dominator
/// tree information when a node's immediate dominator changes.
///
inline void changeImmediateDominator(MachineBasicBlock *N,
MachineBasicBlock* NewIDom) {
applySplitCriticalEdges();
DT->changeImmediateDominator(N, NewIDom);
}
inline void changeImmediateDominator(MachineDomTreeNode *N,
MachineDomTreeNode* NewIDom) {
applySplitCriticalEdges();
DT->changeImmediateDominator(N, NewIDom);
}
/// eraseNode - Removes a node from the dominator tree. Block must not
/// dominate any other blocks. Removes node from its immediate dominator's
/// children list. Deletes dominator node associated with basic block BB.
inline void eraseNode(MachineBasicBlock *BB) {
applySplitCriticalEdges();
DT->eraseNode(BB);
}
/// splitBlock - BB is split and now it has one successor. Update dominator
/// tree to reflect this change.
inline void splitBlock(MachineBasicBlock* NewBB) {
applySplitCriticalEdges();
DT->splitBlock(NewBB);
}
/// isReachableFromEntry - Return true if A is dominated by the entry
/// block of the function containing it.
bool isReachableFromEntry(const MachineBasicBlock *A) {
applySplitCriticalEdges();
return DT->isReachableFromEntry(A);
}
void releaseMemory() override;
void print(raw_ostream &OS, const Module*) const override;
/// \brief Record that the critical edge (FromBB, ToBB) has been
/// split with NewBB.
/// This is best to use this method instead of directly update the
/// underlying information, because this helps mitigating the
/// number of time the DT information is invalidated.
///
/// \note Do not use this method with regular edges.
///
/// \note To benefit from the compile time improvement incurred by this
/// method, the users of this method have to limit the queries to the DT
/// interface between two edges splitting. In other words, they have to
/// pack the splitting of critical edges as much as possible.
void recordSplitCriticalEdge(MachineBasicBlock *FromBB,
MachineBasicBlock *ToBB,
MachineBasicBlock *NewBB) {
bool Inserted = NewBBs.insert(NewBB).second;
(void)Inserted;
assert(Inserted &&
"A basic block inserted via edge splitting cannot appear twice");
CriticalEdgesToSplit.push_back({FromBB, ToBB, NewBB});
}
};
//===-------------------------------------
/// DominatorTree GraphTraits specialization so the DominatorTree can be
/// iterable by generic graph iterators.
///
template<class T> struct GraphTraits;
template <> struct GraphTraits<MachineDomTreeNode *> {
typedef MachineDomTreeNode NodeType;
typedef NodeType::iterator ChildIteratorType;
static NodeType *getEntryNode(NodeType *N) {
return N;
}
static inline ChildIteratorType child_begin(NodeType* N) {
return N->begin();
}
static inline ChildIteratorType child_end(NodeType* N) {
return N->end();
}
};
template <> struct GraphTraits<MachineDominatorTree*>
: public GraphTraits<MachineDomTreeNode *> {
static NodeType *getEntryNode(MachineDominatorTree *DT) {
return DT->getRootNode();
}
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/LivePhysRegs.h | //===- llvm/CodeGen/LivePhysRegs.h - Live Physical Register Set -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the LivePhysRegs utility for tracking liveness of
// physical registers. This can be used for ad-hoc liveness tracking after
// register allocation. You can start with the live-ins/live-outs at the
// beginning/end of a block and update the information while walking the
// instructions inside the block. This implementation tracks the liveness on a
// sub-register granularity.
//
// We assume that the high bits of a physical super-register are not preserved
// unless the instruction has an implicit-use operand reading the super-
// register.
//
// X86 Example:
// %YMM0<def> = ...
// %XMM0<def> = ... (Kills %XMM0, all %XMM0s sub-registers, and %YMM0)
//
// %YMM0<def> = ...
// %XMM0<def> = ..., %YMM0<imp-use> (%YMM0 and all its sub-registers are alive)
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LIVEPHYSREGS_H
#define LLVM_CODEGEN_LIVEPHYSREGS_H
#include "llvm/ADT/SparseSet.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <cassert>
namespace llvm {
class MachineInstr;
/// \brief A set of live physical registers with functions to track liveness
/// when walking backward/forward through a basic block.
class LivePhysRegs {
const TargetRegisterInfo *TRI;
SparseSet<unsigned> LiveRegs;
LivePhysRegs(const LivePhysRegs&) = delete;
LivePhysRegs &operator=(const LivePhysRegs&) = delete;
public:
/// \brief Constructs a new empty LivePhysRegs set.
LivePhysRegs() : TRI(nullptr), LiveRegs() {}
/// \brief Constructs and initialize an empty LivePhysRegs set.
LivePhysRegs(const TargetRegisterInfo *TRI) : TRI(TRI) {
assert(TRI && "Invalid TargetRegisterInfo pointer.");
LiveRegs.setUniverse(TRI->getNumRegs());
}
/// \brief Clear and initialize the LivePhysRegs set.
void init(const TargetRegisterInfo *TRI) {
assert(TRI && "Invalid TargetRegisterInfo pointer.");
this->TRI = TRI;
LiveRegs.clear();
LiveRegs.setUniverse(TRI->getNumRegs());
}
/// \brief Clears the LivePhysRegs set.
void clear() { LiveRegs.clear(); }
/// \brief Returns true if the set is empty.
bool empty() const { return LiveRegs.empty(); }
/// \brief Adds a physical register and all its sub-registers to the set.
void addReg(unsigned Reg) {
assert(TRI && "LivePhysRegs is not initialized.");
assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
LiveRegs.insert(*SubRegs);
}
/// \brief Removes a physical register, all its sub-registers, and all its
/// super-registers from the set.
void removeReg(unsigned Reg) {
assert(TRI && "LivePhysRegs is not initialized.");
assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
LiveRegs.erase(*SubRegs);
for (MCSuperRegIterator SuperRegs(Reg, TRI, /*IncludeSelf=*/false);
SuperRegs.isValid(); ++SuperRegs)
LiveRegs.erase(*SuperRegs);
}
/// \brief Removes physical registers clobbered by the regmask operand @p MO.
void removeRegsInMask(const MachineOperand &MO,
SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers);
/// \brief Returns true if register @p Reg is contained in the set. This also
/// works if only the super register of @p Reg has been defined, because we
/// always add also all sub-registers to the set.
bool contains(unsigned Reg) const { return LiveRegs.count(Reg); }
/// \brief Simulates liveness when stepping backwards over an
/// instruction(bundle): Remove Defs, add uses. This is the recommended way of
/// calculating liveness.
void stepBackward(const MachineInstr &MI);
/// \brief Simulates liveness when stepping forward over an
/// instruction(bundle): Remove killed-uses, add defs. This is the not
/// recommended way, because it depends on accurate kill flags. If possible
/// use stepBackwards() instead of this function.
/// The clobbers set will be the list of registers either defined or clobbered
/// by a regmask. The operand will identify whether this is a regmask or
/// register operand.
void stepForward(const MachineInstr &MI,
SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> &Clobbers);
/// \brief Adds all live-in registers of basic block @p MBB; After prologue/
/// epilogue insertion \p AddPristines should be set to true to insert the
/// pristine registers.
void addLiveIns(const MachineBasicBlock *MBB, bool AddPristines = false);
/// \brief Adds all live-out registers of basic block @p MBB; After prologue/
/// epilogue insertion \p AddPristines should be set to true to insert the
/// pristine registers.
void addLiveOuts(const MachineBasicBlock *MBB, bool AddPristines = false);
typedef SparseSet<unsigned>::const_iterator const_iterator;
const_iterator begin() const { return LiveRegs.begin(); }
const_iterator end() const { return LiveRegs.end(); }
/// \brief Prints the currently live registers to @p OS.
void print(raw_ostream &OS) const;
/// \brief Dumps the currently live registers to the debug output.
void dump() const;
};
inline raw_ostream &operator<<(raw_ostream &OS, const LivePhysRegs& LR) {
LR.print(OS);
return OS;
}
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineFrameInfo.h | //===-- CodeGen/MachineFrameInfo.h - Abstract Stack Frame Rep. --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The file defines the MachineFrameInfo class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEFRAMEINFO_H
#define LLVM_CODEGEN_MACHINEFRAMEINFO_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <vector>
namespace llvm {
class raw_ostream;
class DataLayout;
class TargetRegisterClass;
class Type;
class MachineFunction;
class MachineBasicBlock;
class TargetFrameLowering;
class TargetMachine;
class BitVector;
class Value;
class AllocaInst;
/// The CalleeSavedInfo class tracks the information need to locate where a
/// callee saved register is in the current frame.
class CalleeSavedInfo {
unsigned Reg;
int FrameIdx;
public:
explicit CalleeSavedInfo(unsigned R, int FI = 0)
: Reg(R), FrameIdx(FI) {}
// Accessors.
unsigned getReg() const { return Reg; }
int getFrameIdx() const { return FrameIdx; }
void setFrameIdx(int FI) { FrameIdx = FI; }
};
/// The MachineFrameInfo class represents an abstract stack frame until
/// prolog/epilog code is inserted. This class is key to allowing stack frame
/// representation optimizations, such as frame pointer elimination. It also
/// allows more mundane (but still important) optimizations, such as reordering
/// of abstract objects on the stack frame.
///
/// To support this, the class assigns unique integer identifiers to stack
/// objects requested clients. These identifiers are negative integers for
/// fixed stack objects (such as arguments passed on the stack) or nonnegative
/// for objects that may be reordered. Instructions which refer to stack
/// objects use a special MO_FrameIndex operand to represent these frame
/// indexes.
///
/// Because this class keeps track of all references to the stack frame, it
/// knows when a variable sized object is allocated on the stack. This is the
/// sole condition which prevents frame pointer elimination, which is an
/// important optimization on register-poor architectures. Because original
/// variable sized alloca's in the source program are the only source of
/// variable sized stack objects, it is safe to decide whether there will be
/// any variable sized objects before all stack objects are known (for
/// example, register allocator spill code never needs variable sized
/// objects).
///
/// When prolog/epilog code emission is performed, the final stack frame is
/// built and the machine instructions are modified to refer to the actual
/// stack offsets of the object, eliminating all MO_FrameIndex operands from
/// the program.
///
/// @brief Abstract Stack Frame Information
class MachineFrameInfo {
// Represent a single object allocated on the stack.
struct StackObject {
// The offset of this object from the stack pointer on entry to
// the function. This field has no meaning for a variable sized element.
int64_t SPOffset;
// The size of this object on the stack. 0 means a variable sized object,
// ~0ULL means a dead object.
uint64_t Size;
// The required alignment of this stack slot.
unsigned Alignment;
// If true, the value of the stack object is set before
// entering the function and is not modified inside the function. By
// default, fixed objects are immutable unless marked otherwise.
bool isImmutable;
// If true the stack object is used as spill slot. It
// cannot alias any other memory objects.
bool isSpillSlot;
/// If this stack object is originated from an Alloca instruction
/// this value saves the original IR allocation. Can be NULL.
const AllocaInst *Alloca;
// If true, the object was mapped into the local frame
// block and doesn't need additional handling for allocation beyond that.
bool PreAllocated;
// If true, an LLVM IR value might point to this object.
// Normally, spill slots and fixed-offset objects don't alias IR-accessible
// objects, but there are exceptions (on PowerPC, for example, some byval
// arguments have ABI-prescribed offsets).
bool isAliased;
StackObject(uint64_t Sz, unsigned Al, int64_t SP, bool IM,
bool isSS, const AllocaInst *Val, bool A)
: SPOffset(SP), Size(Sz), Alignment(Al), isImmutable(IM),
isSpillSlot(isSS), Alloca(Val), PreAllocated(false), isAliased(A) {}
};
/// The alignment of the stack.
unsigned StackAlignment;
/// Can the stack be realigned.
bool StackRealignable;
/// The list of stack objects allocated.
std::vector<StackObject> Objects;
/// This contains the number of fixed objects contained on
/// the stack. Because fixed objects are stored at a negative index in the
/// Objects list, this is also the index to the 0th object in the list.
unsigned NumFixedObjects;
/// This boolean keeps track of whether any variable
/// sized objects have been allocated yet.
bool HasVarSizedObjects;
/// This boolean keeps track of whether there is a call
/// to builtin \@llvm.frameaddress.
bool FrameAddressTaken;
/// This boolean keeps track of whether there is a call
/// to builtin \@llvm.returnaddress.
bool ReturnAddressTaken;
/// This boolean keeps track of whether there is a call
/// to builtin \@llvm.experimental.stackmap.
bool HasStackMap;
/// This boolean keeps track of whether there is a call
/// to builtin \@llvm.experimental.patchpoint.
bool HasPatchPoint;
/// The prolog/epilog code inserter calculates the final stack
/// offsets for all of the fixed size objects, updating the Objects list
/// above. It then updates StackSize to contain the number of bytes that need
/// to be allocated on entry to the function.
uint64_t StackSize;
/// The amount that a frame offset needs to be adjusted to
/// have the actual offset from the stack/frame pointer. The exact usage of
/// this is target-dependent, but it is typically used to adjust between
/// SP-relative and FP-relative offsets. E.G., if objects are accessed via
/// SP then OffsetAdjustment is zero; if FP is used, OffsetAdjustment is set
/// to the distance between the initial SP and the value in FP. For many
/// targets, this value is only used when generating debug info (via
/// TargetRegisterInfo::getFrameIndexOffset); when generating code, the
/// corresponding adjustments are performed directly.
int OffsetAdjustment;
/// The prolog/epilog code inserter may process objects that require greater
/// alignment than the default alignment the target provides.
/// To handle this, MaxAlignment is set to the maximum alignment
/// needed by the objects on the current frame. If this is greater than the
/// native alignment maintained by the compiler, dynamic alignment code will
/// be needed.
///
unsigned MaxAlignment;
/// Set to true if this function adjusts the stack -- e.g.,
/// when calling another function. This is only valid during and after
/// prolog/epilog code insertion.
bool AdjustsStack;
/// Set to true if this function has any function calls.
bool HasCalls;
/// The frame index for the stack protector.
int StackProtectorIdx;
/// The frame index for the function context. Used for SjLj exceptions.
int FunctionContextIdx;
/// This contains the size of the largest call frame if the target uses frame
/// setup/destroy pseudo instructions (as defined in the TargetFrameInfo
/// class). This information is important for frame pointer elimination.
/// If is only valid during and after prolog/epilog code insertion.
unsigned MaxCallFrameSize;
/// The prolog/epilog code inserter fills in this vector with each
/// callee saved register saved in the frame. Beyond its use by the prolog/
/// epilog code inserter, this data used for debug info and exception
/// handling.
std::vector<CalleeSavedInfo> CSInfo;
/// Has CSInfo been set yet?
bool CSIValid;
/// References to frame indices which are mapped
/// into the local frame allocation block. <FrameIdx, LocalOffset>
SmallVector<std::pair<int, int64_t>, 32> LocalFrameObjects;
/// Size of the pre-allocated local frame block.
int64_t LocalFrameSize;
/// Required alignment of the local object blob, which is the strictest
/// alignment of any object in it.
unsigned LocalFrameMaxAlign;
/// Whether the local object blob needs to be allocated together. If not,
/// PEI should ignore the isPreAllocated flags on the stack objects and
/// just allocate them normally.
bool UseLocalStackAllocationBlock;
/// Whether the "realign-stack" option is on.
bool RealignOption;
/// True if the function dynamically adjusts the stack pointer through some
/// opaque mechanism like inline assembly or Win32 EH.
bool HasOpaqueSPAdjustment;
/// True if the function contains a call to the llvm.vastart intrinsic.
bool HasVAStart;
/// True if this is a varargs function that contains a musttail call.
bool HasMustTailInVarArgFunc;
/// True if this function contains a tail call. If so immutable objects like
/// function arguments are no longer so. A tail call *can* override fixed
/// stack objects like arguments so we can't treat them as immutable.
bool HasTailCall;
/// Not null, if shrink-wrapping found a better place for the prologue.
MachineBasicBlock *Save;
/// Not null, if shrink-wrapping found a better place for the epilogue.
MachineBasicBlock *Restore;
public:
explicit MachineFrameInfo(unsigned StackAlign, bool isStackRealign,
bool RealignOpt)
: StackAlignment(StackAlign), StackRealignable(isStackRealign),
RealignOption(RealignOpt) {
StackSize = NumFixedObjects = OffsetAdjustment = MaxAlignment = 0;
HasVarSizedObjects = false;
FrameAddressTaken = false;
ReturnAddressTaken = false;
HasStackMap = false;
HasPatchPoint = false;
AdjustsStack = false;
HasCalls = false;
StackProtectorIdx = -1;
FunctionContextIdx = -1;
MaxCallFrameSize = 0;
CSIValid = false;
LocalFrameSize = 0;
LocalFrameMaxAlign = 0;
UseLocalStackAllocationBlock = false;
HasOpaqueSPAdjustment = false;
HasVAStart = false;
HasMustTailInVarArgFunc = false;
Save = nullptr;
Restore = nullptr;
HasTailCall = false;
}
/// Return true if there are any stack objects in this function.
bool hasStackObjects() const { return !Objects.empty(); }
/// This method may be called any time after instruction
/// selection is complete to determine if the stack frame for this function
/// contains any variable sized objects.
bool hasVarSizedObjects() const { return HasVarSizedObjects; }
/// Return the index for the stack protector object.
int getStackProtectorIndex() const { return StackProtectorIdx; }
void setStackProtectorIndex(int I) { StackProtectorIdx = I; }
/// Return the index for the function context object.
/// This object is used for SjLj exceptions.
int getFunctionContextIndex() const { return FunctionContextIdx; }
void setFunctionContextIndex(int I) { FunctionContextIdx = I; }
/// This method may be called any time after instruction
/// selection is complete to determine if there is a call to
/// \@llvm.frameaddress in this function.
bool isFrameAddressTaken() const { return FrameAddressTaken; }
void setFrameAddressIsTaken(bool T) { FrameAddressTaken = T; }
/// This method may be called any time after
/// instruction selection is complete to determine if there is a call to
/// \@llvm.returnaddress in this function.
bool isReturnAddressTaken() const { return ReturnAddressTaken; }
void setReturnAddressIsTaken(bool s) { ReturnAddressTaken = s; }
/// This method may be called any time after instruction
/// selection is complete to determine if there is a call to builtin
/// \@llvm.experimental.stackmap.
bool hasStackMap() const { return HasStackMap; }
void setHasStackMap(bool s = true) { HasStackMap = s; }
/// This method may be called any time after instruction
/// selection is complete to determine if there is a call to builtin
/// \@llvm.experimental.patchpoint.
bool hasPatchPoint() const { return HasPatchPoint; }
void setHasPatchPoint(bool s = true) { HasPatchPoint = s; }
/// Return the minimum frame object index.
int getObjectIndexBegin() const { return -NumFixedObjects; }
/// Return one past the maximum frame object index.
int getObjectIndexEnd() const { return (int)Objects.size()-NumFixedObjects; }
/// Return the number of fixed objects.
unsigned getNumFixedObjects() const { return NumFixedObjects; }
/// Return the number of objects.
unsigned getNumObjects() const { return Objects.size(); }
/// Map a frame index into the local object block
void mapLocalFrameObject(int ObjectIndex, int64_t Offset) {
LocalFrameObjects.push_back(std::pair<int, int64_t>(ObjectIndex, Offset));
Objects[ObjectIndex + NumFixedObjects].PreAllocated = true;
}
/// Get the local offset mapping for a for an object.
std::pair<int, int64_t> getLocalFrameObjectMap(int i) {
assert (i >= 0 && (unsigned)i < LocalFrameObjects.size() &&
"Invalid local object reference!");
return LocalFrameObjects[i];
}
/// Return the number of objects allocated into the local object block.
int64_t getLocalFrameObjectCount() { return LocalFrameObjects.size(); }
/// Set the size of the local object blob.
void setLocalFrameSize(int64_t sz) { LocalFrameSize = sz; }
/// Get the size of the local object blob.
int64_t getLocalFrameSize() const { return LocalFrameSize; }
/// Required alignment of the local object blob,
/// which is the strictest alignment of any object in it.
void setLocalFrameMaxAlign(unsigned Align) { LocalFrameMaxAlign = Align; }
/// Return the required alignment of the local object blob.
unsigned getLocalFrameMaxAlign() const { return LocalFrameMaxAlign; }
/// Get whether the local allocation blob should be allocated together or
/// let PEI allocate the locals in it directly.
bool getUseLocalStackAllocationBlock() {return UseLocalStackAllocationBlock;}
/// setUseLocalStackAllocationBlock - Set whether the local allocation blob
/// should be allocated together or let PEI allocate the locals in it
/// directly.
void setUseLocalStackAllocationBlock(bool v) {
UseLocalStackAllocationBlock = v;
}
/// Return true if the object was pre-allocated into the local block.
bool isObjectPreAllocated(int ObjectIdx) const {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
return Objects[ObjectIdx+NumFixedObjects].PreAllocated;
}
/// Return the size of the specified object.
int64_t getObjectSize(int ObjectIdx) const {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
return Objects[ObjectIdx+NumFixedObjects].Size;
}
/// Change the size of the specified stack object.
void setObjectSize(int ObjectIdx, int64_t Size) {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
Objects[ObjectIdx+NumFixedObjects].Size = Size;
}
/// Return the alignment of the specified stack object.
unsigned getObjectAlignment(int ObjectIdx) const {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
return Objects[ObjectIdx+NumFixedObjects].Alignment;
}
/// setObjectAlignment - Change the alignment of the specified stack object.
void setObjectAlignment(int ObjectIdx, unsigned Align) {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
Objects[ObjectIdx+NumFixedObjects].Alignment = Align;
ensureMaxAlignment(Align);
}
/// Return the underlying Alloca of the specified
/// stack object if it exists. Returns 0 if none exists.
const AllocaInst* getObjectAllocation(int ObjectIdx) const {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
return Objects[ObjectIdx+NumFixedObjects].Alloca;
}
/// Return the assigned stack offset of the specified object
/// from the incoming stack pointer.
int64_t getObjectOffset(int ObjectIdx) const {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
assert(!isDeadObjectIndex(ObjectIdx) &&
"Getting frame offset for a dead object?");
return Objects[ObjectIdx+NumFixedObjects].SPOffset;
}
/// Set the stack frame offset of the specified object. The
/// offset is relative to the stack pointer on entry to the function.
void setObjectOffset(int ObjectIdx, int64_t SPOffset) {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
assert(!isDeadObjectIndex(ObjectIdx) &&
"Setting frame offset for a dead object?");
Objects[ObjectIdx+NumFixedObjects].SPOffset = SPOffset;
}
/// Return the number of bytes that must be allocated to hold
/// all of the fixed size frame objects. This is only valid after
/// Prolog/Epilog code insertion has finalized the stack frame layout.
uint64_t getStackSize() const { return StackSize; }
/// Set the size of the stack.
void setStackSize(uint64_t Size) { StackSize = Size; }
/// Estimate and return the size of the stack frame.
unsigned estimateStackSize(const MachineFunction &MF) const;
/// Return the correction for frame offsets.
int getOffsetAdjustment() const { return OffsetAdjustment; }
/// Set the correction for frame offsets.
void setOffsetAdjustment(int Adj) { OffsetAdjustment = Adj; }
/// Return the alignment in bytes that this function must be aligned to,
/// which is greater than the default stack alignment provided by the target.
unsigned getMaxAlignment() const { return MaxAlignment; }
/// Make sure the function is at least Align bytes aligned.
void ensureMaxAlignment(unsigned Align);
/// Return true if this function adjusts the stack -- e.g.,
/// when calling another function. This is only valid during and after
/// prolog/epilog code insertion.
bool adjustsStack() const { return AdjustsStack; }
void setAdjustsStack(bool V) { AdjustsStack = V; }
/// Return true if the current function has any function calls.
bool hasCalls() const { return HasCalls; }
void setHasCalls(bool V) { HasCalls = V; }
/// Returns true if the function contains opaque dynamic stack adjustments.
bool hasOpaqueSPAdjustment() const { return HasOpaqueSPAdjustment; }
void setHasOpaqueSPAdjustment(bool B) { HasOpaqueSPAdjustment = B; }
/// Returns true if the function calls the llvm.va_start intrinsic.
bool hasVAStart() const { return HasVAStart; }
void setHasVAStart(bool B) { HasVAStart = B; }
/// Returns true if the function is variadic and contains a musttail call.
bool hasMustTailInVarArgFunc() const { return HasMustTailInVarArgFunc; }
void setHasMustTailInVarArgFunc(bool B) { HasMustTailInVarArgFunc = B; }
/// Returns true if the function contains a tail call.
bool hasTailCall() const { return HasTailCall; }
void setHasTailCall() { HasTailCall = true; }
/// Return the maximum size of a call frame that must be
/// allocated for an outgoing function call. This is only available if
/// CallFrameSetup/Destroy pseudo instructions are used by the target, and
/// then only during or after prolog/epilog code insertion.
///
unsigned getMaxCallFrameSize() const { return MaxCallFrameSize; }
void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
/// Create a new object at a fixed location on the stack.
/// All fixed objects should be created before other objects are created for
/// efficiency. By default, fixed objects are not pointed to by LLVM IR
/// values. This returns an index with a negative value.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable,
bool isAliased = false);
/// Create a spill slot at a fixed location on the stack.
/// Returns an index with a negative value.
int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset);
/// Returns true if the specified index corresponds to a fixed stack object.
bool isFixedObjectIndex(int ObjectIdx) const {
return ObjectIdx < 0 && (ObjectIdx >= -(int)NumFixedObjects);
}
/// Returns true if the specified index corresponds
/// to an object that might be pointed to by an LLVM IR value.
bool isAliasedObjectIndex(int ObjectIdx) const {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
return Objects[ObjectIdx+NumFixedObjects].isAliased;
}
/// isImmutableObjectIndex - Returns true if the specified index corresponds
/// to an immutable object.
bool isImmutableObjectIndex(int ObjectIdx) const {
// Tail calling functions can clobber their function arguments.
if (HasTailCall)
return false;
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
return Objects[ObjectIdx+NumFixedObjects].isImmutable;
}
/// Returns true if the specified index corresponds to a spill slot.
bool isSpillSlotObjectIndex(int ObjectIdx) const {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
return Objects[ObjectIdx+NumFixedObjects].isSpillSlot;
}
/// Returns true if the specified index corresponds to a dead object.
bool isDeadObjectIndex(int ObjectIdx) const {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
return Objects[ObjectIdx+NumFixedObjects].Size == ~0ULL;
}
/// Returns true if the specified index corresponds to a variable sized
/// object.
bool isVariableSizedObjectIndex(int ObjectIdx) const {
assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
return Objects[ObjectIdx + NumFixedObjects].Size == 0;
}
/// Create a new statically sized stack object, returning
/// a nonnegative identifier to represent it.
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS,
const AllocaInst *Alloca = nullptr);
/// Create a new statically sized stack object that represents a spill slot,
/// returning a nonnegative identifier to represent it.
int CreateSpillStackObject(uint64_t Size, unsigned Alignment);
/// Remove or mark dead a statically sized stack object.
void RemoveStackObject(int ObjectIdx) {
// Mark it dead.
Objects[ObjectIdx+NumFixedObjects].Size = ~0ULL;
}
/// Notify the MachineFrameInfo object that a variable sized object has been
/// created. This must be created whenever a variable sized object is
/// created, whether or not the index returned is actually used.
int CreateVariableSizedObject(unsigned Alignment, const AllocaInst *Alloca);
/// Returns a reference to call saved info vector for the current function.
const std::vector<CalleeSavedInfo> &getCalleeSavedInfo() const {
return CSInfo;
}
/// Used by prolog/epilog inserter to set the function's callee saved
/// information.
void setCalleeSavedInfo(const std::vector<CalleeSavedInfo> &CSI) {
CSInfo = CSI;
}
/// Has the callee saved info been calculated yet?
bool isCalleeSavedInfoValid() const { return CSIValid; }
void setCalleeSavedInfoValid(bool v) { CSIValid = v; }
MachineBasicBlock *getSavePoint() const { return Save; }
void setSavePoint(MachineBasicBlock *NewSave) { Save = NewSave; }
MachineBasicBlock *getRestorePoint() const { return Restore; }
void setRestorePoint(MachineBasicBlock *NewRestore) { Restore = NewRestore; }
/// Return a set of physical registers that are pristine.
///
/// Pristine registers hold a value that is useless to the current function,
/// but that must be preserved - they are callee saved registers that are not
/// saved.
///
/// Before the PrologueEpilogueInserter has placed the CSR spill code, this
/// method always returns an empty set.
BitVector getPristineRegs(const MachineFunction &MF) const;
/// Used by the MachineFunction printer to print information about
/// stack objects. Implemented in MachineFunction.cpp.
void print(const MachineFunction &MF, raw_ostream &OS) const;
/// dump - Print the function to stderr.
void dump(const MachineFunction &MF) const;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineFunctionPass.h | //===-- MachineFunctionPass.h - Pass for MachineFunctions --------*-C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the MachineFunctionPass class. MachineFunctionPass's are
// just FunctionPass's, except they operate on machine code as part of a code
// generator. Because they operate on machine code, not the LLVM
// representation, MachineFunctionPass's are not allowed to modify the LLVM
// representation. Due to this limitation, the MachineFunctionPass class takes
// care of declaring that no LLVM passes are invalidated.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEFUNCTIONPASS_H
#define LLVM_CODEGEN_MACHINEFUNCTIONPASS_H
#include "llvm/Pass.h"
namespace llvm {
class MachineFunction;
/// MachineFunctionPass - This class adapts the FunctionPass interface to
/// allow convenient creation of passes that operate on the MachineFunction
/// representation. Instead of overriding runOnFunction, subclasses
/// override runOnMachineFunction.
class MachineFunctionPass : public FunctionPass {
protected:
explicit MachineFunctionPass(char &ID) : FunctionPass(ID) {}
/// runOnMachineFunction - This method must be overloaded to perform the
/// desired machine code transformation or analysis.
///
virtual bool runOnMachineFunction(MachineFunction &MF) = 0;
/// getAnalysisUsage - Subclasses that override getAnalysisUsage
/// must call this.
///
/// For MachineFunctionPasses, calling AU.preservesCFG() indicates that
/// the pass does not modify the MachineBasicBlock CFG.
///
void getAnalysisUsage(AnalysisUsage &AU) const override;
private:
/// createPrinterPass - Get a machine function printer pass.
Pass *createPrinterPass(raw_ostream &O,
const std::string &Banner) const override;
bool runOnFunction(Function &F) override;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/LiveIntervalUnion.h | //===-- LiveIntervalUnion.h - Live interval union data struct --*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// LiveIntervalUnion is a union of live segments across multiple live virtual
// registers. This may be used during coalescing to represent a congruence
// class, or during register allocation to model liveness of a physical
// register.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LIVEINTERVALUNION_H
#define LLVM_CODEGEN_LIVEINTERVALUNION_H
#include "llvm/ADT/IntervalMap.h"
#include "llvm/CodeGen/LiveInterval.h"
namespace llvm {
class TargetRegisterInfo;
#ifndef NDEBUG
// forward declaration
template <unsigned Element> class SparseBitVector;
typedef SparseBitVector<128> LiveVirtRegBitSet;
#endif
/// Compare a live virtual register segment to a LiveIntervalUnion segment.
inline bool
overlap(const LiveInterval::Segment &VRSeg,
const IntervalMap<SlotIndex, LiveInterval*>::const_iterator &LUSeg) {
return VRSeg.start < LUSeg.stop() && LUSeg.start() < VRSeg.end;
}
/// Union of live intervals that are strong candidates for coalescing into a
/// single register (either physical or virtual depending on the context). We
/// expect the constituent live intervals to be disjoint, although we may
/// eventually make exceptions to handle value-based interference.
class LiveIntervalUnion {
// A set of live virtual register segments that supports fast insertion,
// intersection, and removal.
// Mapping SlotIndex intervals to virtual register numbers.
typedef IntervalMap<SlotIndex, LiveInterval*> LiveSegments;
public:
// SegmentIter can advance to the next segment ordered by starting position
// which may belong to a different live virtual register. We also must be able
// to reach the current segment's containing virtual register.
typedef LiveSegments::iterator SegmentIter;
// LiveIntervalUnions share an external allocator.
typedef LiveSegments::Allocator Allocator;
class Query;
private:
unsigned Tag; // unique tag for current contents.
LiveSegments Segments; // union of virtual reg segments
public:
explicit LiveIntervalUnion(Allocator &a) : Tag(0), Segments(a) {}
// Iterate over all segments in the union of live virtual registers ordered
// by their starting position.
SegmentIter begin() { return Segments.begin(); }
SegmentIter end() { return Segments.end(); }
SegmentIter find(SlotIndex x) { return Segments.find(x); }
bool empty() const { return Segments.empty(); }
SlotIndex startIndex() const { return Segments.start(); }
// Provide public access to the underlying map to allow overlap iteration.
typedef LiveSegments Map;
const Map &getMap() { return Segments; }
/// getTag - Return an opaque tag representing the current state of the union.
unsigned getTag() const { return Tag; }
/// changedSince - Return true if the union change since getTag returned tag.
bool changedSince(unsigned tag) const { return tag != Tag; }
// Add a live virtual register to this union and merge its segments.
void unify(LiveInterval &VirtReg, const LiveRange &Range);
void unify(LiveInterval &VirtReg) {
unify(VirtReg, VirtReg);
}
// Remove a live virtual register's segments from this union.
void extract(LiveInterval &VirtReg, const LiveRange &Range);
void extract(LiveInterval &VirtReg) {
extract(VirtReg, VirtReg);
}
// Remove all inserted virtual registers.
void clear() { Segments.clear(); ++Tag; }
// Print union, using TRI to translate register names
void print(raw_ostream &OS, const TargetRegisterInfo *TRI) const;
#ifndef NDEBUG
// Verify the live intervals in this union and add them to the visited set.
void verify(LiveVirtRegBitSet& VisitedVRegs);
#endif
/// Query interferences between a single live virtual register and a live
/// interval union.
class Query {
LiveIntervalUnion *LiveUnion;
LiveInterval *VirtReg;
LiveInterval::iterator VirtRegI; // current position in VirtReg
SegmentIter LiveUnionI; // current position in LiveUnion
SmallVector<LiveInterval*,4> InterferingVRegs;
bool CheckedFirstInterference;
bool SeenAllInterferences;
bool SeenUnspillableVReg;
unsigned Tag, UserTag;
public:
Query(): LiveUnion(), VirtReg(), Tag(0), UserTag(0) {}
Query(LiveInterval *VReg, LiveIntervalUnion *LIU):
LiveUnion(LIU), VirtReg(VReg), CheckedFirstInterference(false),
SeenAllInterferences(false), SeenUnspillableVReg(false)
{}
void clear() {
LiveUnion = nullptr;
VirtReg = nullptr;
InterferingVRegs.clear();
CheckedFirstInterference = false;
SeenAllInterferences = false;
SeenUnspillableVReg = false;
Tag = 0;
UserTag = 0;
}
void init(unsigned UTag, LiveInterval *VReg, LiveIntervalUnion *LIU) {
assert(VReg && LIU && "Invalid arguments");
if (UserTag == UTag && VirtReg == VReg &&
LiveUnion == LIU && !LIU->changedSince(Tag)) {
// Retain cached results, e.g. firstInterference.
return;
}
clear();
LiveUnion = LIU;
VirtReg = VReg;
Tag = LIU->getTag();
UserTag = UTag;
}
LiveInterval &virtReg() const {
assert(VirtReg && "uninitialized");
return *VirtReg;
}
// Does this live virtual register interfere with the union?
bool checkInterference() { return collectInterferingVRegs(1); }
// Count the virtual registers in this union that interfere with this
// query's live virtual register, up to maxInterferingRegs.
unsigned collectInterferingVRegs(unsigned MaxInterferingRegs = UINT_MAX);
// Was this virtual register visited during collectInterferingVRegs?
bool isSeenInterference(LiveInterval *VReg) const;
// Did collectInterferingVRegs collect all interferences?
bool seenAllInterferences() const { return SeenAllInterferences; }
// Did collectInterferingVRegs encounter an unspillable vreg?
bool seenUnspillableVReg() const { return SeenUnspillableVReg; }
// Vector generated by collectInterferingVRegs.
const SmallVectorImpl<LiveInterval*> &interferingVRegs() const {
return InterferingVRegs;
}
private:
Query(const Query&) = delete;
void operator=(const Query&) = delete;
};
// Array of LiveIntervalUnions.
class Array {
unsigned Size;
LiveIntervalUnion *LIUs;
public:
Array() : Size(0), LIUs(nullptr) {}
~Array() { clear(); }
// Initialize the array to have Size entries.
// Reuse an existing allocation if the size matches.
void init(LiveIntervalUnion::Allocator&, unsigned Size);
unsigned size() const { return Size; }
void clear();
LiveIntervalUnion& operator[](unsigned idx) {
assert(idx < Size && "idx out of bounds");
return LIUs[idx];
}
const LiveIntervalUnion& operator[](unsigned Idx) const {
assert(Idx < Size && "Idx out of bounds");
return LIUs[Idx];
}
};
};
} // end namespace llvm
#endif // !defined(LLVM_CODEGEN_LIVEINTERVALUNION_H)
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/LiveRegMatrix.h | //===-- LiveRegMatrix.h - Track register interference ---------*- C++ -*---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The LiveRegMatrix analysis pass keeps track of virtual register interference
// along two dimensions: Slot indexes and register units. The matrix is used by
// register allocators to ensure that no interfering virtual registers get
// assigned to overlapping physical registers.
//
// Register units are defined in MCRegisterInfo.h, they represent the smallest
// unit of interference when dealing with overlapping physical registers. The
// LiveRegMatrix is represented as a LiveIntervalUnion per register unit. When
// a virtual register is assigned to a physical register, the live range for
// the virtual register is inserted into the LiveIntervalUnion for each regunit
// in the physreg.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LIVEREGMATRIX_H
#define LLVM_CODEGEN_LIVEREGMATRIX_H
#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/LiveIntervalUnion.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
namespace llvm {
class LiveInterval;
class LiveIntervalAnalysis;
class MachineRegisterInfo;
class TargetRegisterInfo;
class VirtRegMap;
class LiveRegMatrix : public MachineFunctionPass {
const TargetRegisterInfo *TRI;
MachineRegisterInfo *MRI;
LiveIntervals *LIS;
VirtRegMap *VRM;
// UserTag changes whenever virtual registers have been modified.
unsigned UserTag;
// The matrix is represented as a LiveIntervalUnion per register unit.
LiveIntervalUnion::Allocator LIUAlloc;
LiveIntervalUnion::Array Matrix;
// Cached queries per register unit.
std::unique_ptr<LiveIntervalUnion::Query[]> Queries;
// Cached register mask interference info.
unsigned RegMaskTag;
unsigned RegMaskVirtReg;
BitVector RegMaskUsable;
// MachineFunctionPass boilerplate.
void getAnalysisUsage(AnalysisUsage&) const override;
bool runOnMachineFunction(MachineFunction&) override;
void releaseMemory() override;
public:
static char ID;
LiveRegMatrix();
//===--------------------------------------------------------------------===//
// High-level interface.
//===--------------------------------------------------------------------===//
//
// Check for interference before assigning virtual registers to physical
// registers.
//
/// Invalidate cached interference queries after modifying virtual register
/// live ranges. Interference checks may return stale information unless
/// caches are invalidated.
void invalidateVirtRegs() { ++UserTag; }
enum InterferenceKind {
/// No interference, go ahead and assign.
IK_Free = 0,
/// Virtual register interference. There are interfering virtual registers
/// assigned to PhysReg or its aliases. This interference could be resolved
/// by unassigning those other virtual registers.
IK_VirtReg,
/// Register unit interference. A fixed live range is in the way, typically
/// argument registers for a call. This can't be resolved by unassigning
/// other virtual registers.
IK_RegUnit,
/// RegMask interference. The live range is crossing an instruction with a
/// regmask operand that doesn't preserve PhysReg. This typically means
/// VirtReg is live across a call, and PhysReg isn't call-preserved.
IK_RegMask
};
/// Check for interference before assigning VirtReg to PhysReg.
/// If this function returns IK_Free, it is legal to assign(VirtReg, PhysReg).
/// When there is more than one kind of interference, the InterferenceKind
/// with the highest enum value is returned.
InterferenceKind checkInterference(LiveInterval &VirtReg, unsigned PhysReg);
/// Assign VirtReg to PhysReg.
/// This will mark VirtReg's live range as occupied in the LiveRegMatrix and
/// update VirtRegMap. The live range is expected to be available in PhysReg.
void assign(LiveInterval &VirtReg, unsigned PhysReg);
/// Unassign VirtReg from its PhysReg.
/// Assuming that VirtReg was previously assigned to a PhysReg, this undoes
/// the assignment and updates VirtRegMap accordingly.
void unassign(LiveInterval &VirtReg);
/// Returns true if the given \p PhysReg has any live intervals assigned.
bool isPhysRegUsed(unsigned PhysReg) const;
//===--------------------------------------------------------------------===//
// Low-level interface.
//===--------------------------------------------------------------------===//
//
// Provide access to the underlying LiveIntervalUnions.
//
/// Check for regmask interference only.
/// Return true if VirtReg crosses a regmask operand that clobbers PhysReg.
/// If PhysReg is null, check if VirtReg crosses any regmask operands.
bool checkRegMaskInterference(LiveInterval &VirtReg, unsigned PhysReg = 0);
/// Check for regunit interference only.
/// Return true if VirtReg overlaps a fixed assignment of one of PhysRegs's
/// register units.
bool checkRegUnitInterference(LiveInterval &VirtReg, unsigned PhysReg);
/// Query a line of the assigned virtual register matrix directly.
/// Use MCRegUnitIterator to enumerate all regunits in the desired PhysReg.
/// This returns a reference to an internal Query data structure that is only
/// valid until the next query() call.
LiveIntervalUnion::Query &query(LiveInterval &VirtReg, unsigned RegUnit);
/// Directly access the live interval unions per regunit.
/// This returns an array indexed by the regunit number.
LiveIntervalUnion *getLiveUnions() { return &Matrix[0]; }
};
} // end namespace llvm
#endif // LLVM_CODEGEN_LIVEREGMATRIX_H
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/Analysis.h | //===- CodeGen/Analysis.h - CodeGen LLVM IR Analysis Utilities --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares several CodeGen-specific LLVM IR analysis utilities.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_ANALYSIS_H
#define LLVM_CODEGEN_ANALYSIS_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
namespace llvm {
class GlobalValue;
class TargetLoweringBase;
class TargetLowering;
class TargetMachine;
class SDNode;
class SDValue;
class SelectionDAG;
struct EVT;
/// \brief Compute the linearized index of a member in a nested
/// aggregate/struct/array.
///
/// Given an LLVM IR aggregate type and a sequence of insertvalue or
/// extractvalue indices that identify a member, return the linearized index of
/// the start of the member, i.e the number of element in memory before the
/// seeked one. This is disconnected from the number of bytes.
///
/// \param Ty is the type indexed by \p Indices.
/// \param Indices is an optional pointer in the indices list to the current
/// index.
/// \param IndicesEnd is the end of the indices list.
/// \param CurIndex is the current index in the recursion.
///
/// \returns \p CurIndex plus the linear index in \p Ty the indices list.
unsigned ComputeLinearIndex(Type *Ty,
const unsigned *Indices,
const unsigned *IndicesEnd,
unsigned CurIndex = 0);
inline unsigned ComputeLinearIndex(Type *Ty,
ArrayRef<unsigned> Indices,
unsigned CurIndex = 0) {
return ComputeLinearIndex(Ty, Indices.begin(), Indices.end(), CurIndex);
}
/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
/// EVTs that represent all the individual underlying
/// non-aggregate types that comprise it.
///
/// If Offsets is non-null, it points to a vector to be filled in
/// with the in-memory offsets of each of the individual values.
///
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
SmallVectorImpl<EVT> &ValueVTs,
SmallVectorImpl<uint64_t> *Offsets = nullptr,
uint64_t StartingOffset = 0);
/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
GlobalValue *ExtractTypeInfo(Value *V);
/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
/// processed uses a memory 'm' constraint.
bool hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
const TargetLowering &TLI);
/// getFCmpCondCode - Return the ISD condition code corresponding to
/// the given LLVM IR floating-point condition code. This includes
/// consideration of global floating-point math flags.
///
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred);
/// getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats,
/// return the equivalent code if we're allowed to assume that NaNs won't occur.
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC);
/// getICmpCondCode - Return the ISD condition code corresponding to
/// the given LLVM IR integer condition code.
///
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
/// Test if the given instruction is in a position to be optimized
/// with a tail-call. This roughly means that it's in a block with
/// a return and there's nothing that needs to be scheduled
/// between it and the return.
///
/// This function only tests target-independent requirements.
bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM);
/// Test if given that the input instruction is in the tail call position if the
/// return type or any attributes of the function will inhibit tail call
/// optimization.
bool returnTypeIsEligibleForTailCall(const Function *F,
const Instruction *I,
const ReturnInst *Ret,
const TargetLoweringBase &TLI);
// True if GV can be left out of the object symbol table. This is the case
// for linkonce_odr values whose address is not significant. While legal, it is
// not normally profitable to omit them from the .o symbol table. Using this
// analysis makes sense when the information can be passed down to the linker
// or we are in LTO.
bool canBeOmittedFromSymbolTable(const GlobalValue *GV);
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/FunctionLoweringInfo.h | //===-- FunctionLoweringInfo.h - Lower functions from LLVM IR to CodeGen --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements routines for translating functions from LLVM IR into
// Machine IR.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
#define LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <vector>
namespace llvm {
class AllocaInst;
class BasicBlock;
class BranchProbabilityInfo;
class CallInst;
class Function;
class GlobalVariable;
class Instruction;
class MachineInstr;
class MachineBasicBlock;
class MachineFunction;
class MachineModuleInfo;
class MachineRegisterInfo;
class SelectionDAG;
class MVT;
class TargetLowering;
class Value;
//===--------------------------------------------------------------------===//
/// FunctionLoweringInfo - This contains information that is global to a
/// function that is used when lowering a region of the function.
///
class FunctionLoweringInfo {
public:
const Function *Fn;
MachineFunction *MF;
const TargetLowering *TLI;
MachineRegisterInfo *RegInfo;
BranchProbabilityInfo *BPI;
/// CanLowerReturn - true iff the function's return value can be lowered to
/// registers.
bool CanLowerReturn;
/// DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg
/// allocated to hold a pointer to the hidden sret parameter.
unsigned DemoteRegister;
/// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
DenseMap<const BasicBlock*, MachineBasicBlock *> MBBMap;
/// ValueMap - Since we emit code for the function a basic block at a time,
/// we must remember which virtual registers hold the values for
/// cross-basic-block values.
DenseMap<const Value*, unsigned> ValueMap;
// Keep track of frame indices allocated for statepoints as they could be used
// across basic block boundaries.
// Key of the map is statepoint instruction, value is a map from spilled
// llvm Value to the optional stack stack slot index.
// If optional is unspecified it means that we have visited this value
// but didn't spill it.
typedef DenseMap<const Value*, Optional<int>> StatepointSpilledValueMapTy;
DenseMap<const Instruction*, StatepointSpilledValueMapTy>
StatepointRelocatedValues;
/// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
/// the entry block. This allows the allocas to be efficiently referenced
/// anywhere in the function.
DenseMap<const AllocaInst*, int> StaticAllocaMap;
/// ByValArgFrameIndexMap - Keep track of frame indices for byval arguments.
DenseMap<const Argument*, int> ByValArgFrameIndexMap;
/// ArgDbgValues - A list of DBG_VALUE instructions created during isel for
/// function arguments that are inserted after scheduling is completed.
SmallVector<MachineInstr*, 8> ArgDbgValues;
/// RegFixups - Registers which need to be replaced after isel is done.
DenseMap<unsigned, unsigned> RegFixups;
/// StatepointStackSlots - A list of temporary stack slots (frame indices)
/// used to spill values at a statepoint. We store them here to enable
/// reuse of the same stack slots across different statepoints in different
/// basic blocks.
SmallVector<unsigned, 50> StatepointStackSlots;
/// MBB - The current block.
MachineBasicBlock *MBB;
/// MBB - The current insert position inside the current block.
MachineBasicBlock::iterator InsertPt;
#ifndef NDEBUG
SmallPtrSet<const Instruction *, 8> CatchInfoLost;
SmallPtrSet<const Instruction *, 8> CatchInfoFound;
#endif
struct LiveOutInfo {
unsigned NumSignBits : 31;
bool IsValid : 1;
APInt KnownOne, KnownZero;
LiveOutInfo() : NumSignBits(0), IsValid(true), KnownOne(1, 0),
KnownZero(1, 0) {}
};
/// Record the preferred extend type (ISD::SIGN_EXTEND or ISD::ZERO_EXTEND)
/// for a value.
DenseMap<const Value *, ISD::NodeType> PreferredExtendType;
/// VisitedBBs - The set of basic blocks visited thus far by instruction
/// selection.
SmallPtrSet<const BasicBlock*, 4> VisitedBBs;
/// PHINodesToUpdate - A list of phi instructions whose operand list will
/// be updated after processing the current basic block.
/// TODO: This isn't per-function state, it's per-basic-block state. But
/// there's no other convenient place for it to live right now.
std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
unsigned OrigNumPHINodesToUpdate;
/// If the current MBB is a landing pad, the exception pointer and exception
/// selector registers are copied into these virtual registers by
/// SelectionDAGISel::PrepareEHLandingPad().
unsigned ExceptionPointerVirtReg, ExceptionSelectorVirtReg;
/// set - Initialize this FunctionLoweringInfo with the given Function
/// and its associated MachineFunction.
///
void set(const Function &Fn, MachineFunction &MF, SelectionDAG *DAG);
/// clear - Clear out all the function-specific state. This returns this
/// FunctionLoweringInfo to an empty state, ready to be used for a
/// different function.
void clear();
/// isExportedInst - Return true if the specified value is an instruction
/// exported from its block.
bool isExportedInst(const Value *V) {
return ValueMap.count(V);
}
unsigned CreateReg(MVT VT);
unsigned CreateRegs(Type *Ty);
unsigned InitializeRegForValue(const Value *V) {
unsigned &R = ValueMap[V];
assert(R == 0 && "Already initialized this value register!");
return R = CreateRegs(V->getType());
}
/// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
/// register is a PHI destination and the PHI's LiveOutInfo is not valid.
const LiveOutInfo *GetLiveOutRegInfo(unsigned Reg) {
if (!LiveOutRegInfo.inBounds(Reg))
return nullptr;
const LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
if (!LOI->IsValid)
return nullptr;
return LOI;
}
/// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
/// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
/// the register's LiveOutInfo is for a smaller bit width, it is extended to
/// the larger bit width by zero extension. The bit width must be no smaller
/// than the LiveOutInfo's existing bit width.
const LiveOutInfo *GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth);
/// AddLiveOutRegInfo - Adds LiveOutInfo for a register.
void AddLiveOutRegInfo(unsigned Reg, unsigned NumSignBits,
const APInt &KnownZero, const APInt &KnownOne) {
// Only install this information if it tells us something.
if (NumSignBits == 1 && KnownZero == 0 && KnownOne == 0)
return;
LiveOutRegInfo.grow(Reg);
LiveOutInfo &LOI = LiveOutRegInfo[Reg];
LOI.NumSignBits = NumSignBits;
LOI.KnownOne = KnownOne;
LOI.KnownZero = KnownZero;
}
/// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
/// register based on the LiveOutInfo of its operands.
void ComputePHILiveOutRegInfo(const PHINode*);
/// InvalidatePHILiveOutRegInfo - Invalidates a PHI's LiveOutInfo, to be
/// called when a block is visited before all of its predecessors.
void InvalidatePHILiveOutRegInfo(const PHINode *PN) {
// PHIs with no uses have no ValueMap entry.
DenseMap<const Value*, unsigned>::const_iterator It = ValueMap.find(PN);
if (It == ValueMap.end())
return;
unsigned Reg = It->second;
if (Reg == 0)
return;
LiveOutRegInfo.grow(Reg);
LiveOutRegInfo[Reg].IsValid = false;
}
/// setArgumentFrameIndex - Record frame index for the byval
/// argument.
void setArgumentFrameIndex(const Argument *A, int FI);
/// getArgumentFrameIndex - Get frame index for the byval argument.
int getArgumentFrameIndex(const Argument *A);
private:
void addSEHHandlersForLPads(ArrayRef<const LandingPadInst *> LPads);
/// LiveOutRegInfo - Information about live out vregs.
IndexedMap<LiveOutInfo, VirtReg2IndexFunctor> LiveOutRegInfo;
};
/// ComputeUsesVAFloatArgument - Determine if any floating-point values are
/// being passed to this variadic function, and set the MachineModuleInfo's
/// usesVAFloatArgument flag if so. This flag is used to emit an undefined
/// reference to _fltused on Windows, which will link in MSVCRT's
/// floating-point support.
void ComputeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo *MMI);
/// AddLandingPadInfo - Extract the exception handling information from the
/// landingpad instruction and add them to the specified machine module info.
void AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI,
MachineBasicBlock *MBB);
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineTraceMetrics.h | //===- lib/CodeGen/MachineTraceMetrics.h - Super-scalar metrics -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the interface for the MachineTraceMetrics analysis pass
// that estimates CPU resource usage and critical data dependency paths through
// preferred traces. This is useful for super-scalar CPUs where execution speed
// can be limited both by data dependencies and by limited execution resources.
//
// Out-of-order CPUs will often be executing instructions from multiple basic
// blocks at the same time. This makes it difficult to estimate the resource
// usage accurately in a single basic block. Resources can be estimated better
// by looking at a trace through the current basic block.
//
// For every block, the MachineTraceMetrics pass will pick a preferred trace
// that passes through the block. The trace is chosen based on loop structure,
// branch probabilities, and resource usage. The intention is to pick likely
// traces that would be the most affected by code transformations.
//
// It is expensive to compute a full arbitrary trace for every block, so to
// save some computations, traces are chosen to be convergent. This means that
// if the traces through basic blocks A and B ever cross when moving away from
// A and B, they never diverge again. This applies in both directions - If the
// traces meet above A and B, they won't diverge when going further back.
//
// Traces tend to align with loops. The trace through a block in an inner loop
// will begin at the loop entry block and end at a back edge. If there are
// nested loops, the trace may begin and end at those instead.
//
// For each trace, we compute the critical path length, which is the number of
// cycles required to execute the trace when execution is limited by data
// dependencies only. We also compute the resource height, which is the number
// of cycles required to execute all instructions in the trace when ignoring
// data dependencies.
//
// Every instruction in the current block has a slack - the number of cycles
// execution of the instruction can be delayed without extending the critical
// path.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINETRACEMETRICS_H
#define LLVM_CODEGEN_MACHINETRACEMETRICS_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/TargetSchedule.h"
namespace llvm {
class InstrItineraryData;
class MachineBasicBlock;
class MachineInstr;
class MachineLoop;
class MachineLoopInfo;
class MachineRegisterInfo;
class TargetInstrInfo;
class TargetRegisterInfo;
class raw_ostream;
class MachineTraceMetrics : public MachineFunctionPass {
const MachineFunction *MF;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
const MachineRegisterInfo *MRI;
const MachineLoopInfo *Loops;
TargetSchedModel SchedModel;
public:
class Ensemble;
class Trace;
static char ID;
MachineTraceMetrics();
void getAnalysisUsage(AnalysisUsage&) const override;
bool runOnMachineFunction(MachineFunction&) override;
void releaseMemory() override;
void verifyAnalysis() const override;
friend class Ensemble;
friend class Trace;
/// Per-basic block information that doesn't depend on the trace through the
/// block.
struct FixedBlockInfo {
/// The number of non-trivial instructions in the block.
/// Doesn't count PHI and COPY instructions that are likely to be removed.
unsigned InstrCount;
/// True when the block contains calls.
bool HasCalls;
FixedBlockInfo() : InstrCount(~0u), HasCalls(false) {}
/// Returns true when resource information for this block has been computed.
bool hasResources() const { return InstrCount != ~0u; }
/// Invalidate resource information.
void invalidate() { InstrCount = ~0u; }
};
/// Get the fixed resource information about MBB. Compute it on demand.
const FixedBlockInfo *getResources(const MachineBasicBlock*);
/// Get the scaled number of cycles used per processor resource in MBB.
/// This is an array with SchedModel.getNumProcResourceKinds() entries.
/// The getResources() function above must have been called first.
///
/// These numbers have already been scaled by SchedModel.getResourceFactor().
ArrayRef<unsigned> getProcResourceCycles(unsigned MBBNum) const;
/// A virtual register or regunit required by a basic block or its trace
/// successors.
struct LiveInReg {
/// The virtual register required, or a register unit.
unsigned Reg;
/// For virtual registers: Minimum height of the defining instruction.
/// For regunits: Height of the highest user in the trace.
unsigned Height;
LiveInReg(unsigned Reg, unsigned Height = 0) : Reg(Reg), Height(Height) {}
};
/// Per-basic block information that relates to a specific trace through the
/// block. Convergent traces means that only one of these is required per
/// block in a trace ensemble.
struct TraceBlockInfo {
/// Trace predecessor, or NULL for the first block in the trace.
/// Valid when hasValidDepth().
const MachineBasicBlock *Pred;
/// Trace successor, or NULL for the last block in the trace.
/// Valid when hasValidHeight().
const MachineBasicBlock *Succ;
/// The block number of the head of the trace. (When hasValidDepth()).
unsigned Head;
/// The block number of the tail of the trace. (When hasValidHeight()).
unsigned Tail;
/// Accumulated number of instructions in the trace above this block.
/// Does not include instructions in this block.
unsigned InstrDepth;
/// Accumulated number of instructions in the trace below this block.
/// Includes instructions in this block.
unsigned InstrHeight;
TraceBlockInfo() :
Pred(nullptr), Succ(nullptr),
InstrDepth(~0u), InstrHeight(~0u),
HasValidInstrDepths(false), HasValidInstrHeights(false) {}
/// Returns true if the depth resources have been computed from the trace
/// above this block.
bool hasValidDepth() const { return InstrDepth != ~0u; }
/// Returns true if the height resources have been computed from the trace
/// below this block.
bool hasValidHeight() const { return InstrHeight != ~0u; }
/// Invalidate depth resources when some block above this one has changed.
void invalidateDepth() { InstrDepth = ~0u; HasValidInstrDepths = false; }
/// Invalidate height resources when a block below this one has changed.
void invalidateHeight() { InstrHeight = ~0u; HasValidInstrHeights = false; }
/// Assuming that this is a dominator of TBI, determine if it contains
/// useful instruction depths. A dominating block can be above the current
/// trace head, and any dependencies from such a far away dominator are not
/// expected to affect the critical path.
///
/// Also returns true when TBI == this.
bool isUsefulDominator(const TraceBlockInfo &TBI) const {
// The trace for TBI may not even be calculated yet.
if (!hasValidDepth() || !TBI.hasValidDepth())
return false;
// Instruction depths are only comparable if the traces share a head.
if (Head != TBI.Head)
return false;
// It is almost always the case that TBI belongs to the same trace as
// this block, but rare convoluted cases involving irreducible control
// flow, a dominator may share a trace head without actually being on the
// same trace as TBI. This is not a big problem as long as it doesn't
// increase the instruction depth.
return HasValidInstrDepths && InstrDepth <= TBI.InstrDepth;
}
// Data-dependency-related information. Per-instruction depth and height
// are computed from data dependencies in the current trace, using
// itinerary data.
/// Instruction depths have been computed. This implies hasValidDepth().
bool HasValidInstrDepths;
/// Instruction heights have been computed. This implies hasValidHeight().
bool HasValidInstrHeights;
/// Critical path length. This is the number of cycles in the longest data
/// dependency chain through the trace. This is only valid when both
/// HasValidInstrDepths and HasValidInstrHeights are set.
unsigned CriticalPath;
/// Live-in registers. These registers are defined above the current block
/// and used by this block or a block below it.
/// This does not include PHI uses in the current block, but it does
/// include PHI uses in deeper blocks.
SmallVector<LiveInReg, 4> LiveIns;
void print(raw_ostream&) const;
};
/// InstrCycles represents the cycle height and depth of an instruction in a
/// trace.
struct InstrCycles {
/// Earliest issue cycle as determined by data dependencies and instruction
/// latencies from the beginning of the trace. Data dependencies from
/// before the trace are not included.
unsigned Depth;
/// Minimum number of cycles from this instruction is issued to the of the
/// trace, as determined by data dependencies and instruction latencies.
unsigned Height;
};
/// A trace represents a plausible sequence of executed basic blocks that
/// passes through the current basic block one. The Trace class serves as a
/// handle to internal cached data structures.
class Trace {
Ensemble &TE;
TraceBlockInfo &TBI;
unsigned getBlockNum() const { return &TBI - &TE.BlockInfo[0]; }
public:
explicit Trace(Ensemble &te, TraceBlockInfo &tbi) : TE(te), TBI(tbi) {}
void print(raw_ostream&) const;
/// Compute the total number of instructions in the trace.
unsigned getInstrCount() const {
return TBI.InstrDepth + TBI.InstrHeight;
}
/// Return the resource depth of the top/bottom of the trace center block.
/// This is the number of cycles required to execute all instructions from
/// the trace head to the trace center block. The resource depth only
/// considers execution resources, it ignores data dependencies.
/// When Bottom is set, instructions in the trace center block are included.
unsigned getResourceDepth(bool Bottom) const;
/// Return the resource length of the trace. This is the number of cycles
/// required to execute the instructions in the trace if they were all
/// independent, exposing the maximum instruction-level parallelism.
///
/// Any blocks in Extrablocks are included as if they were part of the
/// trace. Likewise, extra resources required by the specified scheduling
/// classes are included. For the caller to account for extra machine
/// instructions, it must first resolve each instruction's scheduling class.
unsigned getResourceLength(
ArrayRef<const MachineBasicBlock *> Extrablocks = None,
ArrayRef<const MCSchedClassDesc *> ExtraInstrs = None,
ArrayRef<const MCSchedClassDesc *> RemoveInstrs = None) const;
/// Return the length of the (data dependency) critical path through the
/// trace.
unsigned getCriticalPath() const { return TBI.CriticalPath; }
/// Return the depth and height of MI. The depth is only valid for
/// instructions in or above the trace center block. The height is only
/// valid for instructions in or below the trace center block.
InstrCycles getInstrCycles(const MachineInstr *MI) const {
return TE.Cycles.lookup(MI);
}
/// Return the slack of MI. This is the number of cycles MI can be delayed
/// before the critical path becomes longer.
/// MI must be an instruction in the trace center block.
unsigned getInstrSlack(const MachineInstr *MI) const;
/// Return the Depth of a PHI instruction in a trace center block successor.
/// The PHI does not have to be part of the trace.
unsigned getPHIDepth(const MachineInstr *PHI) const;
/// A dependence is useful if the basic block of the defining instruction
/// is part of the trace of the user instruction. It is assumed that DefMI
/// dominates UseMI (see also isUsefulDominator).
bool isDepInTrace(const MachineInstr *DefMI,
const MachineInstr *UseMI) const;
};
/// A trace ensemble is a collection of traces selected using the same
/// strategy, for example 'minimum resource height'. There is one trace for
/// every block in the function.
class Ensemble {
SmallVector<TraceBlockInfo, 4> BlockInfo;
DenseMap<const MachineInstr*, InstrCycles> Cycles;
SmallVector<unsigned, 0> ProcResourceDepths;
SmallVector<unsigned, 0> ProcResourceHeights;
friend class Trace;
void computeTrace(const MachineBasicBlock*);
void computeDepthResources(const MachineBasicBlock*);
void computeHeightResources(const MachineBasicBlock*);
unsigned computeCrossBlockCriticalPath(const TraceBlockInfo&);
void computeInstrDepths(const MachineBasicBlock*);
void computeInstrHeights(const MachineBasicBlock*);
void addLiveIns(const MachineInstr *DefMI, unsigned DefOp,
ArrayRef<const MachineBasicBlock*> Trace);
protected:
MachineTraceMetrics &MTM;
virtual const MachineBasicBlock *pickTracePred(const MachineBasicBlock*) =0;
virtual const MachineBasicBlock *pickTraceSucc(const MachineBasicBlock*) =0;
explicit Ensemble(MachineTraceMetrics*);
const MachineLoop *getLoopFor(const MachineBasicBlock*) const;
const TraceBlockInfo *getDepthResources(const MachineBasicBlock*) const;
const TraceBlockInfo *getHeightResources(const MachineBasicBlock*) const;
ArrayRef<unsigned> getProcResourceDepths(unsigned MBBNum) const;
ArrayRef<unsigned> getProcResourceHeights(unsigned MBBNum) const;
public:
virtual ~Ensemble();
virtual const char *getName() const =0;
void print(raw_ostream&) const;
void invalidate(const MachineBasicBlock *MBB);
void verify() const;
/// Get the trace that passes through MBB.
/// The trace is computed on demand.
Trace getTrace(const MachineBasicBlock *MBB);
};
/// Strategies for selecting traces.
enum Strategy {
/// Select the trace through a block that has the fewest instructions.
TS_MinInstrCount,
TS_NumStrategies
};
/// Get the trace ensemble representing the given trace selection strategy.
/// The returned Ensemble object is owned by the MachineTraceMetrics analysis,
/// and valid for the lifetime of the analysis pass.
Ensemble *getEnsemble(Strategy);
/// Invalidate cached information about MBB. This must be called *before* MBB
/// is erased, or the CFG is otherwise changed.
///
/// This invalidates per-block information about resource usage for MBB only,
/// and it invalidates per-trace information for any trace that passes
/// through MBB.
///
/// Call Ensemble::getTrace() again to update any trace handles.
void invalidate(const MachineBasicBlock *MBB);
private:
// One entry per basic block, indexed by block number.
SmallVector<FixedBlockInfo, 4> BlockInfo;
// Cycles consumed on each processor resource per block.
// The number of processor resource kinds is constant for a given subtarget,
// but it is not known at compile time. The number of cycles consumed by
// block B on processor resource R is at ProcResourceCycles[B*Kinds + R]
// where Kinds = SchedModel.getNumProcResourceKinds().
SmallVector<unsigned, 0> ProcResourceCycles;
// One ensemble per strategy.
Ensemble* Ensembles[TS_NumStrategies];
// Convert scaled resource usage to a cycle count that can be compared with
// latencies.
unsigned getCycles(unsigned Scaled) {
unsigned Factor = SchedModel.getLatencyFactor();
return (Scaled + Factor - 1) / Factor;
}
};
inline raw_ostream &operator<<(raw_ostream &OS,
const MachineTraceMetrics::Trace &Tr) {
Tr.print(OS);
return OS;
}
inline raw_ostream &operator<<(raw_ostream &OS,
const MachineTraceMetrics::Ensemble &En) {
En.print(OS);
return OS;
}
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/CalcSpillWeights.h | //===---------------- lib/CodeGen/CalcSpillWeights.h ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_CALCSPILLWEIGHTS_H
#define LLVM_CODEGEN_CALCSPILLWEIGHTS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/SlotIndexes.h"
namespace llvm {
class LiveInterval;
class LiveIntervals;
class MachineBlockFrequencyInfo;
class MachineLoopInfo;
/// \brief Normalize the spill weight of a live interval
///
/// The spill weight of a live interval is computed as:
///
/// (sum(use freq) + sum(def freq)) / (K + size)
///
/// @param UseDefFreq Expected number of executed use and def instructions
/// per function call. Derived from block frequencies.
/// @param Size Size of live interval as returnexd by getSize()
/// @param NumInstr Number of instructions using this live interval
///
static inline float normalizeSpillWeight(float UseDefFreq, unsigned Size,
unsigned NumInstr) {
// The constant 25 instructions is added to avoid depending too much on
// accidental SlotIndex gaps for small intervals. The effect is that small
// intervals have a spill weight that is mostly proportional to the number
// of uses, while large intervals get a spill weight that is closer to a use
// density.
return UseDefFreq / (Size + 25*SlotIndex::InstrDist);
}
/// \brief Calculate auxiliary information for a virtual register such as its
/// spill weight and allocation hint.
class VirtRegAuxInfo {
public:
typedef float (*NormalizingFn)(float, unsigned, unsigned);
private:
MachineFunction &MF;
LiveIntervals &LIS;
const MachineLoopInfo &Loops;
const MachineBlockFrequencyInfo &MBFI;
DenseMap<unsigned, float> Hint;
NormalizingFn normalize;
public:
VirtRegAuxInfo(MachineFunction &mf, LiveIntervals &lis,
const MachineLoopInfo &loops,
const MachineBlockFrequencyInfo &mbfi,
NormalizingFn norm = normalizeSpillWeight)
: MF(mf), LIS(lis), Loops(loops), MBFI(mbfi), normalize(norm) {}
/// \brief (re)compute li's spill weight and allocation hint.
void calculateSpillWeightAndHint(LiveInterval &li);
};
/// \brief Compute spill weights and allocation hints for all virtual register
/// live intervals.
void calculateSpillWeightsAndHints(LiveIntervals &LIS, MachineFunction &MF,
const MachineLoopInfo &MLI,
const MachineBlockFrequencyInfo &MBFI,
VirtRegAuxInfo::NormalizingFn norm =
normalizeSpillWeight);
}
#endif // LLVM_CODEGEN_CALCSPILLWEIGHTS_H
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineOperand.h | //===-- llvm/CodeGen/MachineOperand.h - MachineOperand class ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MachineOperand class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEOPERAND_H
#define LLVM_CODEGEN_MACHINEOPERAND_H
#include "llvm/Support/DataTypes.h"
#include <cassert>
namespace llvm {
class BlockAddress;
class ConstantFP;
class ConstantInt;
class GlobalValue;
class MachineBasicBlock;
class MachineInstr;
class MachineRegisterInfo;
class MDNode;
class ModuleSlotTracker;
class TargetMachine;
class TargetRegisterInfo;
class hash_code;
class raw_ostream;
class MCSymbol;
/// MachineOperand class - Representation of each machine instruction operand.
///
/// This class isn't a POD type because it has a private constructor, but its
/// destructor must be trivial. Functions like MachineInstr::addOperand(),
/// MachineRegisterInfo::moveOperands(), and MF::DeleteMachineInstr() depend on
/// not having to call the MachineOperand destructor.
///
class MachineOperand {
public:
enum MachineOperandType : unsigned char {
MO_Register, ///< Register operand.
MO_Immediate, ///< Immediate operand
MO_CImmediate, ///< Immediate >64bit operand
MO_FPImmediate, ///< Floating-point immediate operand
MO_MachineBasicBlock, ///< MachineBasicBlock reference
MO_FrameIndex, ///< Abstract Stack Frame Index
MO_ConstantPoolIndex, ///< Address of indexed Constant in Constant Pool
MO_TargetIndex, ///< Target-dependent index+offset operand.
MO_JumpTableIndex, ///< Address of indexed Jump Table for switch
MO_ExternalSymbol, ///< Name of external global symbol
MO_GlobalAddress, ///< Address of a global value
MO_BlockAddress, ///< Address of a basic block
MO_RegisterMask, ///< Mask of preserved registers.
MO_RegisterLiveOut, ///< Mask of live-out registers.
MO_Metadata, ///< Metadata reference (for debug info)
MO_MCSymbol, ///< MCSymbol reference (for debug/eh info)
MO_CFIIndex ///< MCCFIInstruction index.
};
private:
/// OpKind - Specify what kind of operand this is. This discriminates the
/// union.
MachineOperandType OpKind : 8;
/// Subregister number for MO_Register. A value of 0 indicates the
/// MO_Register has no subReg.
///
/// For all other kinds of operands, this field holds target-specific flags.
unsigned SubReg_TargetFlags : 12;
/// TiedTo - Non-zero when this register operand is tied to another register
/// operand. The encoding of this field is described in the block comment
/// before MachineInstr::tieOperands().
unsigned char TiedTo : 4;
/// IsDef/IsImp/IsKill/IsDead flags - These are only valid for MO_Register
/// operands.
/// IsDef - True if this is a def, false if this is a use of the register.
///
bool IsDef : 1;
/// IsImp - True if this is an implicit def or use, false if it is explicit.
///
bool IsImp : 1;
/// IsKill - True if this instruction is the last use of the register on this
/// path through the function. This is only valid on uses of registers.
bool IsKill : 1;
/// IsDead - True if this register is never used by a subsequent instruction.
/// This is only valid on definitions of registers.
bool IsDead : 1;
/// IsUndef - True if this register operand reads an "undef" value, i.e. the
/// read value doesn't matter. This flag can be set on both use and def
/// operands. On a sub-register def operand, it refers to the part of the
/// register that isn't written. On a full-register def operand, it is a
/// noop. See readsReg().
///
/// This is only valid on registers.
///
/// Note that an instruction may have multiple <undef> operands referring to
/// the same register. In that case, the instruction may depend on those
/// operands reading the same dont-care value. For example:
///
/// %vreg1<def> = XOR %vreg2<undef>, %vreg2<undef>
///
/// Any register can be used for %vreg2, and its value doesn't matter, but
/// the two operands must be the same register.
///
bool IsUndef : 1;
/// IsInternalRead - True if this operand reads a value that was defined
/// inside the same instruction or bundle. This flag can be set on both use
/// and def operands. On a sub-register def operand, it refers to the part
/// of the register that isn't written. On a full-register def operand, it
/// is a noop.
///
/// When this flag is set, the instruction bundle must contain at least one
/// other def of the register. If multiple instructions in the bundle define
/// the register, the meaning is target-defined.
bool IsInternalRead : 1;
/// IsEarlyClobber - True if this MO_Register 'def' operand is written to
/// by the MachineInstr before all input registers are read. This is used to
/// model the GCC inline asm '&' constraint modifier.
bool IsEarlyClobber : 1;
/// IsDebug - True if this MO_Register 'use' operand is in a debug pseudo,
/// not a real instruction. Such uses should be ignored during codegen.
bool IsDebug : 1;
/// SmallContents - This really should be part of the Contents union, but
/// lives out here so we can get a better packed struct.
/// MO_Register: Register number.
/// OffsetedInfo: Low bits of offset.
union {
unsigned RegNo; // For MO_Register.
unsigned OffsetLo; // Matches Contents.OffsetedInfo.OffsetHi.
} SmallContents;
/// ParentMI - This is the instruction that this operand is embedded into.
/// This is valid for all operand types, when the operand is in an instr.
MachineInstr *ParentMI;
/// Contents union - This contains the payload for the various operand types.
union {
MachineBasicBlock *MBB; // For MO_MachineBasicBlock.
const ConstantFP *CFP; // For MO_FPImmediate.
const ConstantInt *CI; // For MO_CImmediate. Integers > 64bit.
int64_t ImmVal; // For MO_Immediate.
const uint32_t *RegMask; // For MO_RegisterMask and MO_RegisterLiveOut.
const MDNode *MD; // For MO_Metadata.
MCSymbol *Sym; // For MO_MCSymbol.
unsigned CFIIndex; // For MO_CFI.
struct { // For MO_Register.
// Register number is in SmallContents.RegNo.
MachineOperand *Prev; // Access list for register. See MRI.
MachineOperand *Next;
} Reg;
/// OffsetedInfo - This struct contains the offset and an object identifier.
/// this represent the object as with an optional offset from it.
struct {
union {
int Index; // For MO_*Index - The index itself.
const char *SymbolName; // For MO_ExternalSymbol.
const GlobalValue *GV; // For MO_GlobalAddress.
const BlockAddress *BA; // For MO_BlockAddress.
} Val;
// Low bits of offset are in SmallContents.OffsetLo.
int OffsetHi; // An offset from the object, high 32 bits.
} OffsetedInfo;
} Contents;
explicit MachineOperand(MachineOperandType K)
: OpKind(K), SubReg_TargetFlags(0), ParentMI(nullptr) {}
public:
/// getType - Returns the MachineOperandType for this operand.
///
MachineOperandType getType() const { return (MachineOperandType)OpKind; }
unsigned getTargetFlags() const {
return isReg() ? 0 : SubReg_TargetFlags;
}
void setTargetFlags(unsigned F) {
assert(!isReg() && "Register operands can't have target flags");
SubReg_TargetFlags = F;
assert(SubReg_TargetFlags == F && "Target flags out of range");
}
void addTargetFlag(unsigned F) {
assert(!isReg() && "Register operands can't have target flags");
SubReg_TargetFlags |= F;
assert((SubReg_TargetFlags & F) && "Target flags out of range");
}
/// getParent - Return the instruction that this operand belongs to.
///
MachineInstr *getParent() { return ParentMI; }
const MachineInstr *getParent() const { return ParentMI; }
/// clearParent - Reset the parent pointer.
///
/// The MachineOperand copy constructor also copies ParentMI, expecting the
/// original to be deleted. If a MachineOperand is ever stored outside a
/// MachineInstr, the parent pointer must be cleared.
///
/// Never call clearParent() on an operand in a MachineInstr.
///
void clearParent() { ParentMI = nullptr; }
void print(raw_ostream &os, const TargetRegisterInfo *TRI = nullptr) const;
void print(raw_ostream &os, ModuleSlotTracker &MST,
const TargetRegisterInfo *TRI = nullptr) const;
//===--------------------------------------------------------------------===//
// Accessors that tell you what kind of MachineOperand you're looking at.
//===--------------------------------------------------------------------===//
/// isReg - Tests if this is a MO_Register operand.
bool isReg() const { return OpKind == MO_Register; }
/// isImm - Tests if this is a MO_Immediate operand.
bool isImm() const { return OpKind == MO_Immediate; }
/// isCImm - Test if this is a MO_CImmediate operand.
bool isCImm() const { return OpKind == MO_CImmediate; }
/// isFPImm - Tests if this is a MO_FPImmediate operand.
bool isFPImm() const { return OpKind == MO_FPImmediate; }
/// isMBB - Tests if this is a MO_MachineBasicBlock operand.
bool isMBB() const { return OpKind == MO_MachineBasicBlock; }
/// isFI - Tests if this is a MO_FrameIndex operand.
bool isFI() const { return OpKind == MO_FrameIndex; }
/// isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isCPI() const { return OpKind == MO_ConstantPoolIndex; }
/// isTargetIndex - Tests if this is a MO_TargetIndex operand.
bool isTargetIndex() const { return OpKind == MO_TargetIndex; }
/// isJTI - Tests if this is a MO_JumpTableIndex operand.
bool isJTI() const { return OpKind == MO_JumpTableIndex; }
/// isGlobal - Tests if this is a MO_GlobalAddress operand.
bool isGlobal() const { return OpKind == MO_GlobalAddress; }
/// isSymbol - Tests if this is a MO_ExternalSymbol operand.
bool isSymbol() const { return OpKind == MO_ExternalSymbol; }
/// isBlockAddress - Tests if this is a MO_BlockAddress operand.
bool isBlockAddress() const { return OpKind == MO_BlockAddress; }
/// isRegMask - Tests if this is a MO_RegisterMask operand.
bool isRegMask() const { return OpKind == MO_RegisterMask; }
/// isRegLiveOut - Tests if this is a MO_RegisterLiveOut operand.
bool isRegLiveOut() const { return OpKind == MO_RegisterLiveOut; }
/// isMetadata - Tests if this is a MO_Metadata operand.
bool isMetadata() const { return OpKind == MO_Metadata; }
bool isMCSymbol() const { return OpKind == MO_MCSymbol; }
bool isCFIIndex() const { return OpKind == MO_CFIIndex; }
//===--------------------------------------------------------------------===//
// Accessors for Register Operands
//===--------------------------------------------------------------------===//
/// getReg - Returns the register number.
unsigned getReg() const {
assert(isReg() && "This is not a register operand!");
return SmallContents.RegNo;
}
unsigned getSubReg() const {
assert(isReg() && "Wrong MachineOperand accessor");
return SubReg_TargetFlags;
}
bool isUse() const {
assert(isReg() && "Wrong MachineOperand accessor");
return !IsDef;
}
bool isDef() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsDef;
}
bool isImplicit() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsImp;
}
bool isDead() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsDead;
}
bool isKill() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsKill;
}
bool isUndef() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsUndef;
}
bool isInternalRead() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsInternalRead;
}
bool isEarlyClobber() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsEarlyClobber;
}
bool isTied() const {
assert(isReg() && "Wrong MachineOperand accessor");
return TiedTo;
}
bool isDebug() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsDebug;
}
/// readsReg - Returns true if this operand reads the previous value of its
/// register. A use operand with the <undef> flag set doesn't read its
/// register. A sub-register def implicitly reads the other parts of the
/// register being redefined unless the <undef> flag is set.
///
/// This refers to reading the register value from before the current
/// instruction or bundle. Internal bundle reads are not included.
bool readsReg() const {
assert(isReg() && "Wrong MachineOperand accessor");
return !isUndef() && !isInternalRead() && (isUse() || getSubReg());
}
//===--------------------------------------------------------------------===//
// Mutators for Register Operands
//===--------------------------------------------------------------------===//
/// Change the register this operand corresponds to.
///
void setReg(unsigned Reg);
void setSubReg(unsigned subReg) {
assert(isReg() && "Wrong MachineOperand accessor");
SubReg_TargetFlags = subReg;
assert(SubReg_TargetFlags == subReg && "SubReg out of range");
}
/// substVirtReg - Substitute the current register with the virtual
/// subregister Reg:SubReg. Take any existing SubReg index into account,
/// using TargetRegisterInfo to compose the subreg indices if necessary.
/// Reg must be a virtual register, SubIdx can be 0.
///
void substVirtReg(unsigned Reg, unsigned SubIdx, const TargetRegisterInfo&);
/// substPhysReg - Substitute the current register with the physical register
/// Reg, taking any existing SubReg into account. For instance,
/// substPhysReg(%EAX) will change %reg1024:sub_8bit to %AL.
///
void substPhysReg(unsigned Reg, const TargetRegisterInfo&);
void setIsUse(bool Val = true) { setIsDef(!Val); }
void setIsDef(bool Val = true);
void setImplicit(bool Val = true) {
assert(isReg() && "Wrong MachineOperand accessor");
IsImp = Val;
}
void setIsKill(bool Val = true) {
assert(isReg() && !IsDef && "Wrong MachineOperand accessor");
assert((!Val || !isDebug()) && "Marking a debug operation as kill");
IsKill = Val;
}
void setIsDead(bool Val = true) {
assert(isReg() && IsDef && "Wrong MachineOperand accessor");
IsDead = Val;
}
void setIsUndef(bool Val = true) {
assert(isReg() && "Wrong MachineOperand accessor");
IsUndef = Val;
}
void setIsInternalRead(bool Val = true) {
assert(isReg() && "Wrong MachineOperand accessor");
IsInternalRead = Val;
}
void setIsEarlyClobber(bool Val = true) {
assert(isReg() && IsDef && "Wrong MachineOperand accessor");
IsEarlyClobber = Val;
}
void setIsDebug(bool Val = true) {
assert(isReg() && !IsDef && "Wrong MachineOperand accessor");
IsDebug = Val;
}
//===--------------------------------------------------------------------===//
// Accessors for various operand types.
//===--------------------------------------------------------------------===//
int64_t getImm() const {
assert(isImm() && "Wrong MachineOperand accessor");
return Contents.ImmVal;
}
const ConstantInt *getCImm() const {
assert(isCImm() && "Wrong MachineOperand accessor");
return Contents.CI;
}
const ConstantFP *getFPImm() const {
assert(isFPImm() && "Wrong MachineOperand accessor");
return Contents.CFP;
}
MachineBasicBlock *getMBB() const {
assert(isMBB() && "Wrong MachineOperand accessor");
return Contents.MBB;
}
int getIndex() const {
assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
"Wrong MachineOperand accessor");
return Contents.OffsetedInfo.Val.Index;
}
const GlobalValue *getGlobal() const {
assert(isGlobal() && "Wrong MachineOperand accessor");
return Contents.OffsetedInfo.Val.GV;
}
const BlockAddress *getBlockAddress() const {
assert(isBlockAddress() && "Wrong MachineOperand accessor");
return Contents.OffsetedInfo.Val.BA;
}
MCSymbol *getMCSymbol() const {
assert(isMCSymbol() && "Wrong MachineOperand accessor");
return Contents.Sym;
}
unsigned getCFIIndex() const {
assert(isCFIIndex() && "Wrong MachineOperand accessor");
return Contents.CFIIndex;
}
/// Return the offset from the symbol in this operand. This always returns 0
/// for ExternalSymbol operands.
int64_t getOffset() const {
assert((isGlobal() || isSymbol() || isMCSymbol() || isCPI() ||
isTargetIndex() || isBlockAddress()) &&
"Wrong MachineOperand accessor");
return int64_t(uint64_t(Contents.OffsetedInfo.OffsetHi) << 32) |
SmallContents.OffsetLo;
}
const char *getSymbolName() const {
assert(isSymbol() && "Wrong MachineOperand accessor");
return Contents.OffsetedInfo.Val.SymbolName;
}
/// clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
/// It is sometimes necessary to detach the register mask pointer from its
/// machine operand. This static method can be used for such detached bit
/// mask pointers.
static bool clobbersPhysReg(const uint32_t *RegMask, unsigned PhysReg) {
// See TargetRegisterInfo.h.
assert(PhysReg < (1u << 30) && "Not a physical register");
return !(RegMask[PhysReg / 32] & (1u << PhysReg % 32));
}
/// clobbersPhysReg - Returns true if this RegMask operand clobbers PhysReg.
bool clobbersPhysReg(unsigned PhysReg) const {
return clobbersPhysReg(getRegMask(), PhysReg);
}
/// getRegMask - Returns a bit mask of registers preserved by this RegMask
/// operand.
const uint32_t *getRegMask() const {
assert(isRegMask() && "Wrong MachineOperand accessor");
return Contents.RegMask;
}
/// getRegLiveOut - Returns a bit mask of live-out registers.
const uint32_t *getRegLiveOut() const {
assert(isRegLiveOut() && "Wrong MachineOperand accessor");
return Contents.RegMask;
}
const MDNode *getMetadata() const {
assert(isMetadata() && "Wrong MachineOperand accessor");
return Contents.MD;
}
//===--------------------------------------------------------------------===//
// Mutators for various operand types.
//===--------------------------------------------------------------------===//
void setImm(int64_t immVal) {
assert(isImm() && "Wrong MachineOperand mutator");
Contents.ImmVal = immVal;
}
void setFPImm(const ConstantFP *CFP) {
assert(isFPImm() && "Wrong MachineOperand mutator");
Contents.CFP = CFP;
}
void setOffset(int64_t Offset) {
assert((isGlobal() || isSymbol() || isMCSymbol() || isCPI() ||
isTargetIndex() || isBlockAddress()) &&
"Wrong MachineOperand accessor");
SmallContents.OffsetLo = unsigned(Offset);
Contents.OffsetedInfo.OffsetHi = int(Offset >> 32);
}
void setIndex(int Idx) {
assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
"Wrong MachineOperand accessor");
Contents.OffsetedInfo.Val.Index = Idx;
}
void setMBB(MachineBasicBlock *MBB) {
assert(isMBB() && "Wrong MachineOperand accessor");
Contents.MBB = MBB;
}
//===--------------------------------------------------------------------===//
// Other methods.
//===--------------------------------------------------------------------===//
/// isIdenticalTo - Return true if this operand is identical to the specified
/// operand. Note: This method ignores isKill and isDead properties.
bool isIdenticalTo(const MachineOperand &Other) const;
/// \brief MachineOperand hash_value overload.
///
/// Note that this includes the same information in the hash that
/// isIdenticalTo uses for comparison. It is thus suited for use in hash
/// tables which use that function for equality comparisons only.
friend hash_code hash_value(const MachineOperand &MO);
/// ChangeToImmediate - Replace this operand with a new immediate operand of
/// the specified value. If an operand is known to be an immediate already,
/// the setImm method should be used.
void ChangeToImmediate(int64_t ImmVal);
/// ChangeToFPImmediate - Replace this operand with a new FP immediate operand
/// of the specified value. If an operand is known to be an FP immediate
/// already, the setFPImm method should be used.
void ChangeToFPImmediate(const ConstantFP *FPImm);
/// ChangeToES - Replace this operand with a new external symbol operand.
void ChangeToES(const char *SymName, unsigned char TargetFlags = 0);
/// ChangeToMCSymbol - Replace this operand with a new MC symbol operand.
void ChangeToMCSymbol(MCSymbol *Sym);
/// ChangeToRegister - Replace this operand with a new register operand of
/// the specified value. If an operand is known to be an register already,
/// the setReg method should be used.
void ChangeToRegister(unsigned Reg, bool isDef, bool isImp = false,
bool isKill = false, bool isDead = false,
bool isUndef = false, bool isDebug = false);
//===--------------------------------------------------------------------===//
// Construction methods.
//===--------------------------------------------------------------------===//
static MachineOperand CreateImm(int64_t Val) {
MachineOperand Op(MachineOperand::MO_Immediate);
Op.setImm(Val);
return Op;
}
static MachineOperand CreateCImm(const ConstantInt *CI) {
MachineOperand Op(MachineOperand::MO_CImmediate);
Op.Contents.CI = CI;
return Op;
}
static MachineOperand CreateFPImm(const ConstantFP *CFP) {
MachineOperand Op(MachineOperand::MO_FPImmediate);
Op.Contents.CFP = CFP;
return Op;
}
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp = false,
bool isKill = false, bool isDead = false,
bool isUndef = false,
bool isEarlyClobber = false,
unsigned SubReg = 0,
bool isDebug = false,
bool isInternalRead = false) {
assert(!(isDead && !isDef) && "Dead flag on non-def");
assert(!(isKill && isDef) && "Kill flag on def");
MachineOperand Op(MachineOperand::MO_Register);
Op.IsDef = isDef;
Op.IsImp = isImp;
Op.IsKill = isKill;
Op.IsDead = isDead;
Op.IsUndef = isUndef;
Op.IsInternalRead = isInternalRead;
Op.IsEarlyClobber = isEarlyClobber;
Op.TiedTo = 0;
Op.IsDebug = isDebug;
Op.SmallContents.RegNo = Reg;
Op.Contents.Reg.Prev = nullptr;
Op.Contents.Reg.Next = nullptr;
Op.setSubReg(SubReg);
return Op;
}
static MachineOperand CreateMBB(MachineBasicBlock *MBB,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_MachineBasicBlock);
Op.setMBB(MBB);
Op.setTargetFlags(TargetFlags);
return Op;
}
static MachineOperand CreateFI(int Idx) {
MachineOperand Op(MachineOperand::MO_FrameIndex);
Op.setIndex(Idx);
return Op;
}
static MachineOperand CreateCPI(unsigned Idx, int Offset,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_ConstantPoolIndex);
Op.setIndex(Idx);
Op.setOffset(Offset);
Op.setTargetFlags(TargetFlags);
return Op;
}
static MachineOperand CreateTargetIndex(unsigned Idx, int64_t Offset,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_TargetIndex);
Op.setIndex(Idx);
Op.setOffset(Offset);
Op.setTargetFlags(TargetFlags);
return Op;
}
static MachineOperand CreateJTI(unsigned Idx,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_JumpTableIndex);
Op.setIndex(Idx);
Op.setTargetFlags(TargetFlags);
return Op;
}
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_GlobalAddress);
Op.Contents.OffsetedInfo.Val.GV = GV;
Op.setOffset(Offset);
Op.setTargetFlags(TargetFlags);
return Op;
}
static MachineOperand CreateES(const char *SymName,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_ExternalSymbol);
Op.Contents.OffsetedInfo.Val.SymbolName = SymName;
Op.setOffset(0); // Offset is always 0.
Op.setTargetFlags(TargetFlags);
return Op;
}
static MachineOperand CreateBA(const BlockAddress *BA, int64_t Offset,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_BlockAddress);
Op.Contents.OffsetedInfo.Val.BA = BA;
Op.setOffset(Offset);
Op.setTargetFlags(TargetFlags);
return Op;
}
/// CreateRegMask - Creates a register mask operand referencing Mask. The
/// operand does not take ownership of the memory referenced by Mask, it must
/// remain valid for the lifetime of the operand.
///
/// A RegMask operand represents a set of non-clobbered physical registers on
/// an instruction that clobbers many registers, typically a call. The bit
/// mask has a bit set for each physreg that is preserved by this
/// instruction, as described in the documentation for
/// TargetRegisterInfo::getCallPreservedMask().
///
/// Any physreg with a 0 bit in the mask is clobbered by the instruction.
///
static MachineOperand CreateRegMask(const uint32_t *Mask) {
assert(Mask && "Missing register mask");
MachineOperand Op(MachineOperand::MO_RegisterMask);
Op.Contents.RegMask = Mask;
return Op;
}
static MachineOperand CreateRegLiveOut(const uint32_t *Mask) {
assert(Mask && "Missing live-out register mask");
MachineOperand Op(MachineOperand::MO_RegisterLiveOut);
Op.Contents.RegMask = Mask;
return Op;
}
static MachineOperand CreateMetadata(const MDNode *Meta) {
MachineOperand Op(MachineOperand::MO_Metadata);
Op.Contents.MD = Meta;
return Op;
}
static MachineOperand CreateMCSymbol(MCSymbol *Sym,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_MCSymbol);
Op.Contents.Sym = Sym;
Op.setOffset(0);
Op.setTargetFlags(TargetFlags);
return Op;
}
static MachineOperand CreateCFIIndex(unsigned CFIIndex) {
MachineOperand Op(MachineOperand::MO_CFIIndex);
Op.Contents.CFIIndex = CFIIndex;
return Op;
}
friend class MachineInstr;
friend class MachineRegisterInfo;
private:
void removeRegFromUses();
//===--------------------------------------------------------------------===//
// Methods for handling register use/def lists.
//===--------------------------------------------------------------------===//
/// isOnRegUseList - Return true if this operand is on a register use/def list
/// or false if not. This can only be called for register operands that are
/// part of a machine instruction.
bool isOnRegUseList() const {
assert(isReg() && "Can only add reg operand to use lists");
return Contents.Reg.Prev != nullptr;
}
};
inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand& MO) {
MO.print(OS, nullptr);
return OS;
}
// See friend declaration above. This additional declaration is required in
// order to compile LLVM with IBM xlC compiler.
hash_code hash_value(const MachineOperand &MO);
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachinePostDominators.h | //=- llvm/CodeGen/MachineDominators.h ----------------------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file exposes interfaces to post dominance information for
// target-specific code.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H
#define LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
namespace llvm {
///
/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used
/// to compute the post-dominator tree.
///
struct MachinePostDominatorTree : public MachineFunctionPass {
private:
DominatorTreeBase<MachineBasicBlock> *DT;
public:
static char ID;
MachinePostDominatorTree();
~MachinePostDominatorTree() override;
FunctionPass *createMachinePostDominatorTreePass();
const std::vector<MachineBasicBlock *> &getRoots() const {
return DT->getRoots();
}
MachineDomTreeNode *getRootNode() const {
return DT->getRootNode();
}
MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
return DT->getNode(BB);
}
MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
return DT->getNode(BB);
}
bool dominates(const MachineDomTreeNode *A,
const MachineDomTreeNode *B) const {
return DT->dominates(A, B);
}
bool dominates(const MachineBasicBlock *A, const MachineBasicBlock *B) const {
return DT->dominates(A, B);
}
bool properlyDominates(const MachineDomTreeNode *A,
const MachineDomTreeNode *B) const {
return DT->properlyDominates(A, B);
}
bool properlyDominates(const MachineBasicBlock *A,
const MachineBasicBlock *B) const {
return DT->properlyDominates(A, B);
}
MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
MachineBasicBlock *B) {
return DT->findNearestCommonDominator(A, B);
}
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
void print(llvm::raw_ostream &OS, const Module *M = nullptr) const override;
};
} //end of namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/RegAllocPBQP.h | //===-- RegAllocPBQP.h ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the PBQPBuilder interface, for classes which build PBQP
// instances to represent register allocation problems, and the RegAllocPBQP
// interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_REGALLOCPBQP_H
#define LLVM_CODEGEN_REGALLOCPBQP_H
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/PBQP/CostAllocator.h"
#include "llvm/CodeGen/PBQP/ReductionRules.h"
#include "llvm/CodeGen/PBQPRAConstraint.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
class raw_ostream;
namespace PBQP {
namespace RegAlloc {
/// @brief Spill option index.
inline unsigned getSpillOptionIdx() { return 0; }
/// \brief Metadata to speed allocatability test.
///
/// Keeps track of the number of infinities in each row and column.
class MatrixMetadata {
private:
MatrixMetadata(const MatrixMetadata&);
void operator=(const MatrixMetadata&);
public:
MatrixMetadata(const Matrix& M)
: WorstRow(0), WorstCol(0),
UnsafeRows(new bool[M.getRows() - 1]()),
UnsafeCols(new bool[M.getCols() - 1]()) {
unsigned* ColCounts = new unsigned[M.getCols() - 1]();
for (unsigned i = 1; i < M.getRows(); ++i) {
unsigned RowCount = 0;
for (unsigned j = 1; j < M.getCols(); ++j) {
if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
++RowCount;
++ColCounts[j - 1];
UnsafeRows[i - 1] = true;
UnsafeCols[j - 1] = true;
}
}
WorstRow = std::max(WorstRow, RowCount);
}
unsigned WorstColCountForCurRow =
*std::max_element(ColCounts, ColCounts + M.getCols() - 1);
WorstCol = std::max(WorstCol, WorstColCountForCurRow);
delete[] ColCounts;
}
unsigned getWorstRow() const { return WorstRow; }
unsigned getWorstCol() const { return WorstCol; }
const bool* getUnsafeRows() const { return UnsafeRows.get(); }
const bool* getUnsafeCols() const { return UnsafeCols.get(); }
private:
unsigned WorstRow, WorstCol;
std::unique_ptr<bool[]> UnsafeRows;
std::unique_ptr<bool[]> UnsafeCols;
};
/// \brief Holds a vector of the allowed physical regs for a vreg.
class AllowedRegVector {
friend hash_code hash_value(const AllowedRegVector &);
public:
AllowedRegVector() : NumOpts(0), Opts(nullptr) {}
AllowedRegVector(const std::vector<unsigned> &OptVec)
: NumOpts(OptVec.size()), Opts(new unsigned[NumOpts]) {
std::copy(OptVec.begin(), OptVec.end(), Opts.get());
}
AllowedRegVector(const AllowedRegVector &Other)
: NumOpts(Other.NumOpts), Opts(new unsigned[NumOpts]) {
std::copy(Other.Opts.get(), Other.Opts.get() + NumOpts, Opts.get());
}
AllowedRegVector(AllowedRegVector &&Other)
: NumOpts(std::move(Other.NumOpts)), Opts(std::move(Other.Opts)) {}
AllowedRegVector& operator=(const AllowedRegVector &Other) {
NumOpts = Other.NumOpts;
Opts.reset(new unsigned[NumOpts]);
std::copy(Other.Opts.get(), Other.Opts.get() + NumOpts, Opts.get());
return *this;
}
AllowedRegVector& operator=(AllowedRegVector &&Other) {
NumOpts = std::move(Other.NumOpts);
Opts = std::move(Other.Opts);
return *this;
}
unsigned size() const { return NumOpts; }
unsigned operator[](size_t I) const { return Opts[I]; }
bool operator==(const AllowedRegVector &Other) const {
if (NumOpts != Other.NumOpts)
return false;
return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
}
bool operator!=(const AllowedRegVector &Other) const {
return !(*this == Other);
}
private:
unsigned NumOpts;
std::unique_ptr<unsigned[]> Opts;
};
inline hash_code hash_value(const AllowedRegVector &OptRegs) {
unsigned *OStart = OptRegs.Opts.get();
unsigned *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
return hash_combine(OptRegs.NumOpts,
hash_combine_range(OStart, OEnd));
}
/// \brief Holds graph-level metadata relevent to PBQP RA problems.
class GraphMetadata {
private:
typedef ValuePool<AllowedRegVector> AllowedRegVecPool;
public:
typedef AllowedRegVecPool::PoolRef AllowedRegVecRef;
GraphMetadata(MachineFunction &MF,
LiveIntervals &LIS,
MachineBlockFrequencyInfo &MBFI)
: MF(MF), LIS(LIS), MBFI(MBFI) {}
MachineFunction &MF;
LiveIntervals &LIS;
MachineBlockFrequencyInfo &MBFI;
void setNodeIdForVReg(unsigned VReg, GraphBase::NodeId NId) {
VRegToNodeId[VReg] = NId;
}
GraphBase::NodeId getNodeIdForVReg(unsigned VReg) const {
auto VRegItr = VRegToNodeId.find(VReg);
if (VRegItr == VRegToNodeId.end())
return GraphBase::invalidNodeId();
return VRegItr->second;
}
void eraseNodeIdForVReg(unsigned VReg) {
VRegToNodeId.erase(VReg);
}
AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
return AllowedRegVecs.getValue(std::move(Allowed));
}
private:
DenseMap<unsigned, GraphBase::NodeId> VRegToNodeId;
AllowedRegVecPool AllowedRegVecs;
};
/// \brief Holds solver state and other metadata relevant to each PBQP RA node.
class NodeMetadata {
public:
typedef RegAlloc::AllowedRegVector AllowedRegVector;
// The node's reduction state. The order in this enum is important,
// as it is assumed nodes can only progress up (i.e. towards being
// optimally reducible) when reducing the graph.
typedef enum {
Unprocessed,
NotProvablyAllocatable,
ConservativelyAllocatable,
OptimallyReducible
} ReductionState;
NodeMetadata()
: RS(Unprocessed), NumOpts(0), DeniedOpts(0), OptUnsafeEdges(nullptr),
VReg(0)
#ifndef NDEBUG
, everConservativelyAllocatable(false)
#endif
{}
// FIXME: Re-implementing default behavior to work around MSVC. Remove once
// MSVC synthesizes move constructors properly.
NodeMetadata(const NodeMetadata &Other)
: RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
AllowedRegs(Other.AllowedRegs)
#ifndef NDEBUG
, everConservativelyAllocatable(Other.everConservativelyAllocatable)
#endif
{
if (NumOpts > 0) {
std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
&OptUnsafeEdges[0]);
}
}
// FIXME: Re-implementing default behavior to work around MSVC. Remove once
// MSVC synthesizes move constructors properly.
NodeMetadata(NodeMetadata &&Other)
: RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
OptUnsafeEdges(std::move(Other.OptUnsafeEdges)), VReg(Other.VReg),
AllowedRegs(std::move(Other.AllowedRegs))
#ifndef NDEBUG
, everConservativelyAllocatable(Other.everConservativelyAllocatable)
#endif
{}
// FIXME: Re-implementing default behavior to work around MSVC. Remove once
// MSVC synthesizes move constructors properly.
NodeMetadata& operator=(const NodeMetadata &Other) {
RS = Other.RS;
NumOpts = Other.NumOpts;
DeniedOpts = Other.DeniedOpts;
OptUnsafeEdges.reset(new unsigned[NumOpts]);
std::copy(Other.OptUnsafeEdges.get(), Other.OptUnsafeEdges.get() + NumOpts,
OptUnsafeEdges.get());
VReg = Other.VReg;
AllowedRegs = Other.AllowedRegs;
#ifndef NDEBUG
everConservativelyAllocatable = Other.everConservativelyAllocatable;
#endif
return *this;
}
// FIXME: Re-implementing default behavior to work around MSVC. Remove once
// MSVC synthesizes move constructors properly.
NodeMetadata& operator=(NodeMetadata &&Other) {
RS = Other.RS;
NumOpts = Other.NumOpts;
DeniedOpts = Other.DeniedOpts;
OptUnsafeEdges = std::move(Other.OptUnsafeEdges);
VReg = Other.VReg;
AllowedRegs = std::move(Other.AllowedRegs);
#ifndef NDEBUG
everConservativelyAllocatable = Other.everConservativelyAllocatable;
#endif
return *this;
}
void setVReg(unsigned VReg) { this->VReg = VReg; }
unsigned getVReg() const { return VReg; }
void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
this->AllowedRegs = std::move(AllowedRegs);
}
const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
void setup(const Vector& Costs) {
NumOpts = Costs.getLength() - 1;
OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
}
ReductionState getReductionState() const { return RS; }
void setReductionState(ReductionState RS) {
assert(RS >= this->RS && "A node's reduction state can not be downgraded");
this->RS = RS;
#ifndef NDEBUG
// Remember this state to assert later that a non-infinite register
// option was available.
if (RS == ConservativelyAllocatable)
everConservativelyAllocatable = true;
#endif
}
void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
const bool* UnsafeOpts =
Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
for (unsigned i = 0; i < NumOpts; ++i)
OptUnsafeEdges[i] += UnsafeOpts[i];
}
void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
const bool* UnsafeOpts =
Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
for (unsigned i = 0; i < NumOpts; ++i)
OptUnsafeEdges[i] -= UnsafeOpts[i];
}
bool isConservativelyAllocatable() const {
return (DeniedOpts < NumOpts) ||
(std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
&OptUnsafeEdges[NumOpts]);
}
#ifndef NDEBUG
bool wasConservativelyAllocatable() const {
return everConservativelyAllocatable;
}
#endif
private:
ReductionState RS;
unsigned NumOpts;
unsigned DeniedOpts;
std::unique_ptr<unsigned[]> OptUnsafeEdges;
unsigned VReg;
GraphMetadata::AllowedRegVecRef AllowedRegs;
#ifndef NDEBUG
bool everConservativelyAllocatable;
#endif
};
class RegAllocSolverImpl {
private:
typedef MDMatrix<MatrixMetadata> RAMatrix;
public:
typedef PBQP::Vector RawVector;
typedef PBQP::Matrix RawMatrix;
typedef PBQP::Vector Vector;
typedef RAMatrix Matrix;
typedef PBQP::PoolCostAllocator<Vector, Matrix> CostAllocator;
typedef GraphBase::NodeId NodeId;
typedef GraphBase::EdgeId EdgeId;
typedef RegAlloc::NodeMetadata NodeMetadata;
struct EdgeMetadata { };
typedef RegAlloc::GraphMetadata GraphMetadata;
typedef PBQP::Graph<RegAllocSolverImpl> Graph;
RegAllocSolverImpl(Graph &G) : G(G) {}
Solution solve() {
G.setSolver(*this);
Solution S;
setup();
S = backpropagate(G, reduce());
G.unsetSolver();
return S;
}
void handleAddNode(NodeId NId) {
assert(G.getNodeCosts(NId).getLength() > 1 &&
"PBQP Graph should not contain single or zero-option nodes");
G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
}
void handleRemoveNode(NodeId NId) {}
void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
void handleAddEdge(EdgeId EId) {
handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
}
void handleRemoveEdge(EdgeId EId) {
handleDisconnectEdge(EId, G.getEdgeNode1Id(EId));
handleDisconnectEdge(EId, G.getEdgeNode2Id(EId));
}
void handleDisconnectEdge(EdgeId EId, NodeId NId) {
NodeMetadata& NMd = G.getNodeMetadata(NId);
const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
promote(NId, NMd);
}
void handleReconnectEdge(EdgeId EId, NodeId NId) {
NodeMetadata& NMd = G.getNodeMetadata(NId);
const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
}
void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
NodeId N1Id = G.getEdgeNode1Id(EId);
NodeId N2Id = G.getEdgeNode2Id(EId);
NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
bool Transpose = N1Id != G.getEdgeNode1Id(EId);
// Metadata are computed incrementally. First, update them
// by removing the old cost.
const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
N1Md.handleRemoveEdge(OldMMd, Transpose);
N2Md.handleRemoveEdge(OldMMd, !Transpose);
// And update now the metadata with the new cost.
const MatrixMetadata& MMd = NewCosts.getMetadata();
N1Md.handleAddEdge(MMd, Transpose);
N2Md.handleAddEdge(MMd, !Transpose);
// As the metadata may have changed with the update, the nodes may have
// become ConservativelyAllocatable or OptimallyReducible.
promote(N1Id, N1Md);
promote(N2Id, N2Md);
}
private:
void promote(NodeId NId, NodeMetadata& NMd) {
if (G.getNodeDegree(NId) == 3) {
// This node is becoming optimally reducible.
moveToOptimallyReducibleNodes(NId);
} else if (NMd.getReductionState() ==
NodeMetadata::NotProvablyAllocatable &&
NMd.isConservativelyAllocatable()) {
// This node just became conservatively allocatable.
moveToConservativelyAllocatableNodes(NId);
}
}
void removeFromCurrentSet(NodeId NId) {
switch (G.getNodeMetadata(NId).getReductionState()) {
case NodeMetadata::Unprocessed: break;
case NodeMetadata::OptimallyReducible:
assert(OptimallyReducibleNodes.find(NId) !=
OptimallyReducibleNodes.end() &&
"Node not in optimally reducible set.");
OptimallyReducibleNodes.erase(NId);
break;
case NodeMetadata::ConservativelyAllocatable:
assert(ConservativelyAllocatableNodes.find(NId) !=
ConservativelyAllocatableNodes.end() &&
"Node not in conservatively allocatable set.");
ConservativelyAllocatableNodes.erase(NId);
break;
case NodeMetadata::NotProvablyAllocatable:
assert(NotProvablyAllocatableNodes.find(NId) !=
NotProvablyAllocatableNodes.end() &&
"Node not in not-provably-allocatable set.");
NotProvablyAllocatableNodes.erase(NId);
break;
}
}
void moveToOptimallyReducibleNodes(NodeId NId) {
removeFromCurrentSet(NId);
OptimallyReducibleNodes.insert(NId);
G.getNodeMetadata(NId).setReductionState(
NodeMetadata::OptimallyReducible);
}
void moveToConservativelyAllocatableNodes(NodeId NId) {
removeFromCurrentSet(NId);
ConservativelyAllocatableNodes.insert(NId);
G.getNodeMetadata(NId).setReductionState(
NodeMetadata::ConservativelyAllocatable);
}
void moveToNotProvablyAllocatableNodes(NodeId NId) {
removeFromCurrentSet(NId);
NotProvablyAllocatableNodes.insert(NId);
G.getNodeMetadata(NId).setReductionState(
NodeMetadata::NotProvablyAllocatable);
}
void setup() {
// Set up worklists.
for (auto NId : G.nodeIds()) {
if (G.getNodeDegree(NId) < 3)
moveToOptimallyReducibleNodes(NId);
else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
moveToConservativelyAllocatableNodes(NId);
else
moveToNotProvablyAllocatableNodes(NId);
}
}
// Compute a reduction order for the graph by iteratively applying PBQP
// reduction rules. Locally optimal rules are applied whenever possible (R0,
// R1, R2). If no locally-optimal rules apply then any conservatively
// allocatable node is reduced. Finally, if no conservatively allocatable
// node exists then the node with the lowest spill-cost:degree ratio is
// selected.
std::vector<GraphBase::NodeId> reduce() {
assert(!G.empty() && "Cannot reduce empty graph.");
typedef GraphBase::NodeId NodeId;
std::vector<NodeId> NodeStack;
// Consume worklists.
while (true) {
if (!OptimallyReducibleNodes.empty()) {
NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
NodeId NId = *NItr;
OptimallyReducibleNodes.erase(NItr);
NodeStack.push_back(NId);
switch (G.getNodeDegree(NId)) {
case 0:
break;
case 1:
applyR1(G, NId);
break;
case 2:
applyR2(G, NId);
break;
default: llvm_unreachable("Not an optimally reducible node.");
}
} else if (!ConservativelyAllocatableNodes.empty()) {
// Conservatively allocatable nodes will never spill. For now just
// take the first node in the set and push it on the stack. When we
// start optimizing more heavily for register preferencing, it may
// would be better to push nodes with lower 'expected' or worst-case
// register costs first (since early nodes are the most
// constrained).
NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
NodeId NId = *NItr;
ConservativelyAllocatableNodes.erase(NItr);
NodeStack.push_back(NId);
G.disconnectAllNeighborsFromNode(NId);
} else if (!NotProvablyAllocatableNodes.empty()) {
NodeSet::iterator NItr =
std::min_element(NotProvablyAllocatableNodes.begin(),
NotProvablyAllocatableNodes.end(),
SpillCostComparator(G));
NodeId NId = *NItr;
NotProvablyAllocatableNodes.erase(NItr);
NodeStack.push_back(NId);
G.disconnectAllNeighborsFromNode(NId);
} else
break;
}
return NodeStack;
}
class SpillCostComparator {
public:
SpillCostComparator(const Graph& G) : G(G) {}
bool operator()(NodeId N1Id, NodeId N2Id) {
PBQPNum N1SC = G.getNodeCosts(N1Id)[0];
PBQPNum N2SC = G.getNodeCosts(N2Id)[0];
if (N1SC == N2SC)
return G.getNodeDegree(N1Id) < G.getNodeDegree(N2Id);
return N1SC < N2SC;
}
private:
const Graph& G;
};
Graph& G;
typedef std::set<NodeId> NodeSet;
NodeSet OptimallyReducibleNodes;
NodeSet ConservativelyAllocatableNodes;
NodeSet NotProvablyAllocatableNodes;
};
class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
private:
typedef PBQP::Graph<RegAllocSolverImpl> BaseT;
public:
PBQPRAGraph(GraphMetadata Metadata) : BaseT(Metadata) {}
/// @brief Dump this graph to dbgs().
void dump() const;
/// @brief Dump this graph to an output stream.
/// @param OS Output stream to print on.
void dump(raw_ostream &OS) const;
/// @brief Print a representation of this graph in DOT format.
/// @param OS Output stream to print on.
void printDot(raw_ostream &OS) const;
};
inline Solution solve(PBQPRAGraph& G) {
if (G.empty())
return Solution();
RegAllocSolverImpl RegAllocSolver(G);
return RegAllocSolver.solve();
}
} // namespace RegAlloc
} // namespace PBQP
/// @brief Create a PBQP register allocator instance.
FunctionPass *
createPBQPRegisterAllocator(char *customPassID = nullptr);
} // namespace llvm
#endif /* LLVM_CODEGEN_REGALLOCPBQP_H */
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/LinkAllCodegenComponents.h | //===- llvm/Codegen/LinkAllCodegenComponents.h ------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This header file pulls in all codegen related passes for tools like lli and
// llc that need this functionality.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
#define LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
#include "llvm/CodeGen/GCs.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/Target/TargetMachine.h"
#include <cstdlib>
namespace {
struct ForceCodegenLinking {
ForceCodegenLinking() {
// We must reference the passes in such a way that compilers will not
// delete it all as dead code, even with whole program optimization,
// yet is effectively a NO-OP. As the compiler isn't smart enough
// to know that getenv() never returns -1, this will do the job.
if (std::getenv("bar") != (char*) -1)
return;
(void) llvm::createFastRegisterAllocator();
(void) llvm::createBasicRegisterAllocator();
(void) llvm::createGreedyRegisterAllocator();
(void) llvm::createDefaultPBQPRegisterAllocator();
llvm::linkCoreCLRGC();
llvm::linkOcamlGC();
llvm::linkErlangGC();
llvm::linkShadowStackGC();
llvm::linkStatepointExampleGC();
(void) llvm::createBURRListDAGScheduler(nullptr,
llvm::CodeGenOpt::Default);
(void) llvm::createSourceListDAGScheduler(nullptr,
llvm::CodeGenOpt::Default);
(void) llvm::createHybridListDAGScheduler(nullptr,
llvm::CodeGenOpt::Default);
(void) llvm::createFastDAGScheduler(nullptr, llvm::CodeGenOpt::Default);
(void) llvm::createDefaultScheduler(nullptr, llvm::CodeGenOpt::Default);
(void) llvm::createVLIWDAGScheduler(nullptr, llvm::CodeGenOpt::Default);
}
} ForceCodegenLinking; // Force link by creating a global definition.
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h | //==-- llvm/CodeGen/TargetLoweringObjectFileImpl.h - Object Info -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements classes used to handle lowerings specific to common
// object file formats.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
#define LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/SectionKind.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
namespace llvm {
class MachineModuleInfo;
class Mangler;
class MCAsmInfo;
class MCExpr;
class MCSection;
class MCSectionMachO;
class MCSymbol;
class MCContext;
class GlobalValue;
class TargetMachine;
class TargetLoweringObjectFileELF : public TargetLoweringObjectFile {
bool UseInitArray;
mutable unsigned NextUniqueID = 0;
public:
TargetLoweringObjectFileELF() : UseInitArray(false) {}
~TargetLoweringObjectFileELF() override {}
void emitPersonalityValue(MCStreamer &Streamer, const TargetMachine &TM,
const MCSymbol *Sym) const override;
/// Given a constant with the SectionKind, return a section that it should be
/// placed in.
MCSection *getSectionForConstant(SectionKind Kind,
const Constant *C) const override;
MCSection *getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler &Mang,
const TargetMachine &TM) const override;
MCSection *SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler &Mang,
const TargetMachine &TM) const override;
MCSection *getSectionForJumpTable(const Function &F, Mangler &Mang,
const TargetMachine &TM) const override;
bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
const Function &F) const override;
/// Return an MCExpr to use for a reference to the specified type info global
/// variable from exception handling information.
const MCExpr *
getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
Mangler &Mang, const TargetMachine &TM,
MachineModuleInfo *MMI,
MCStreamer &Streamer) const override;
// The symbol that gets passed to .cfi_personality.
MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV, Mangler &Mang,
const TargetMachine &TM,
MachineModuleInfo *MMI) const override;
void InitializeELF(bool UseInitArray_);
MCSection *getStaticCtorSection(unsigned Priority,
const MCSymbol *KeySym) const override;
MCSection *getStaticDtorSection(unsigned Priority,
const MCSymbol *KeySym) const override;
};
class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
public:
~TargetLoweringObjectFileMachO() override {}
TargetLoweringObjectFileMachO();
/// Emit the module flags that specify the garbage collection information.
void emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
Mangler &Mang, const TargetMachine &TM) const override;
MCSection *SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler &Mang,
const TargetMachine &TM) const override;
MCSection *getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler &Mang,
const TargetMachine &TM) const override;
MCSection *getSectionForConstant(SectionKind Kind,
const Constant *C) const override;
/// The mach-o version of this method defaults to returning a stub reference.
const MCExpr *
getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
Mangler &Mang, const TargetMachine &TM,
MachineModuleInfo *MMI,
MCStreamer &Streamer) const override;
// The symbol that gets passed to .cfi_personality.
MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV, Mangler &Mang,
const TargetMachine &TM,
MachineModuleInfo *MMI) const override;
/// Get MachO PC relative GOT entry relocation
const MCExpr *getIndirectSymViaGOTPCRel(const MCSymbol *Sym,
const MCValue &MV, int64_t Offset,
MachineModuleInfo *MMI,
MCStreamer &Streamer) const override;
};
class TargetLoweringObjectFileCOFF : public TargetLoweringObjectFile {
public:
~TargetLoweringObjectFileCOFF() override {}
MCSection *getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler &Mang,
const TargetMachine &TM) const override;
MCSection *SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler &Mang,
const TargetMachine &TM) const override;
void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
bool CannotUsePrivateLabel, Mangler &Mang,
const TargetMachine &TM) const override;
MCSection *getSectionForJumpTable(const Function &F, Mangler &Mang,
const TargetMachine &TM) const override;
/// Emit Obj-C garbage collection and linker options. Only linker option
/// emission is implemented for COFF.
void emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
Mangler &Mang, const TargetMachine &TM) const override;
MCSection *getStaticCtorSection(unsigned Priority,
const MCSymbol *KeySym) const override;
MCSection *getStaticDtorSection(unsigned Priority,
const MCSymbol *KeySym) const override;
void emitLinkerFlagsForGlobal(raw_ostream &OS, const GlobalValue *GV,
const Mangler &Mang) const override;
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineBasicBlock.h | //===-- llvm/CodeGen/MachineBasicBlock.h ------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Collect the sequence of machine instructions for a basic block.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEBASICBLOCK_H
#define LLVM_CODEGEN_MACHINEBASICBLOCK_H
#include "llvm/ADT/GraphTraits.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Support/DataTypes.h"
#include <functional>
namespace llvm {
class Pass;
class BasicBlock;
class MachineFunction;
class MCSymbol;
class SlotIndexes;
class StringRef;
class raw_ostream;
class MachineBranchProbabilityInfo;
template <>
struct ilist_traits<MachineInstr> : public ilist_default_traits<MachineInstr> {
private:
mutable ilist_half_node<MachineInstr> Sentinel;
// this is only set by the MachineBasicBlock owning the LiveList
friend class MachineBasicBlock;
MachineBasicBlock* Parent;
public:
// HLSL Change Starts
// Temporarily disable "downcast of address" UBSAN runtime error
// https://github.com/microsoft/DirectXShaderCompiler/issues/6446
#ifdef __has_feature
#if __has_feature(undefined_behavior_sanitizer)
__attribute__((no_sanitize("undefined")))
#endif // __has_feature(address_sanitizer)
#endif // defined(__has_feature)
// HLSL Change Ends
MachineInstr *
createSentinel() const {
return static_cast<MachineInstr*>(&Sentinel);
}
void destroySentinel(MachineInstr *) const {}
MachineInstr *provideInitialHead() const { return createSentinel(); }
MachineInstr *ensureHead(MachineInstr*) const { return createSentinel(); }
static void noteHead(MachineInstr*, MachineInstr*) {}
void addNodeToList(MachineInstr* N);
void removeNodeFromList(MachineInstr* N);
void transferNodesFromList(ilist_traits &SrcTraits,
ilist_iterator<MachineInstr> first,
ilist_iterator<MachineInstr> last);
void deleteNode(MachineInstr *N);
private:
void createNode(const MachineInstr &);
};
class MachineBasicBlock : public ilist_node<MachineBasicBlock> {
typedef ilist<MachineInstr> Instructions;
Instructions Insts;
const BasicBlock *BB;
int Number;
MachineFunction *xParent;
/// Predecessors/Successors - Keep track of the predecessor / successor
/// basicblocks.
std::vector<MachineBasicBlock *> Predecessors;
std::vector<MachineBasicBlock *> Successors;
/// Weights - Keep track of the weights to the successors. This vector
/// has the same order as Successors, or it is empty if we don't use it
/// (disable optimization).
std::vector<uint32_t> Weights;
typedef std::vector<uint32_t>::iterator weight_iterator;
typedef std::vector<uint32_t>::const_iterator const_weight_iterator;
/// LiveIns - Keep track of the physical registers that are livein of
/// the basicblock.
std::vector<unsigned> LiveIns;
/// Alignment - Alignment of the basic block. Zero if the basic block does
/// not need to be aligned.
/// The alignment is specified as log2(bytes).
unsigned Alignment;
/// IsLandingPad - Indicate that this basic block is entered via an
/// exception handler.
bool IsLandingPad;
/// AddressTaken - Indicate that this basic block is potentially the
/// target of an indirect branch.
bool AddressTaken;
/// \brief since getSymbol is a relatively heavy-weight operation, the symbol
/// is only computed once and is cached.
mutable MCSymbol *CachedMCSymbol;
// Intrusive list support
MachineBasicBlock() {}
explicit MachineBasicBlock(MachineFunction &mf, const BasicBlock *bb);
~MachineBasicBlock();
// MachineBasicBlocks are allocated and owned by MachineFunction.
friend class MachineFunction;
public:
/// getBasicBlock - Return the LLVM basic block that this instance
/// corresponded to originally. Note that this may be NULL if this instance
/// does not correspond directly to an LLVM basic block.
///
const BasicBlock *getBasicBlock() const { return BB; }
/// getName - Return the name of the corresponding LLVM basic block, or
/// "(null)".
StringRef getName() const;
/// getFullName - Return a formatted string to identify this block and its
/// parent function.
std::string getFullName() const;
/// hasAddressTaken - Test whether this block is potentially the target
/// of an indirect branch.
bool hasAddressTaken() const { return AddressTaken; }
/// setHasAddressTaken - Set this block to reflect that it potentially
/// is the target of an indirect branch.
void setHasAddressTaken() { AddressTaken = true; }
/// getParent - Return the MachineFunction containing this basic block.
///
const MachineFunction *getParent() const { return xParent; }
MachineFunction *getParent() { return xParent; }
/// bundle_iterator - MachineBasicBlock iterator that automatically skips over
/// MIs that are inside bundles (i.e. walk top level MIs only).
template<typename Ty, typename IterTy>
class bundle_iterator {
IterTy MII;
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = Ty;
using difference_type = std::ptrdiff_t;
using pointer = value_type *;
using reference = value_type &;
bundle_iterator(IterTy mii) : MII(mii) {}
bundle_iterator(Ty &mi) : MII(mi) {
assert(!mi.isBundledWithPred() &&
"It's not legal to initialize bundle_iterator with a bundled MI");
}
bundle_iterator(Ty *mi) : MII(mi) {
assert((!mi || !mi->isBundledWithPred()) &&
"It's not legal to initialize bundle_iterator with a bundled MI");
}
// Template allows conversion from const to nonconst.
template<class OtherTy, class OtherIterTy>
bundle_iterator(const bundle_iterator<OtherTy, OtherIterTy> &I)
: MII(I.getInstrIterator()) {}
bundle_iterator() : MII(nullptr) {}
Ty &operator*() const { return *MII; }
Ty *operator->() const { return &operator*(); }
operator Ty*() const { return MII; }
bool operator==(const bundle_iterator &x) const {
return MII == x.MII;
}
bool operator!=(const bundle_iterator &x) const {
return !operator==(x);
}
// Increment and decrement operators...
bundle_iterator &operator--() { // predecrement - Back up
do --MII;
while (MII->isBundledWithPred());
return *this;
}
bundle_iterator &operator++() { // preincrement - Advance
while (MII->isBundledWithSucc())
++MII;
++MII;
return *this;
}
bundle_iterator operator--(int) { // postdecrement operators...
bundle_iterator tmp = *this;
--*this;
return tmp;
}
bundle_iterator operator++(int) { // postincrement operators...
bundle_iterator tmp = *this;
++*this;
return tmp;
}
IterTy getInstrIterator() const {
return MII;
}
};
typedef Instructions::iterator instr_iterator;
typedef Instructions::const_iterator const_instr_iterator;
typedef std::reverse_iterator<instr_iterator> reverse_instr_iterator;
typedef
std::reverse_iterator<const_instr_iterator> const_reverse_instr_iterator;
typedef
bundle_iterator<MachineInstr,instr_iterator> iterator;
typedef
bundle_iterator<const MachineInstr,const_instr_iterator> const_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
unsigned size() const { return (unsigned)Insts.size(); }
bool empty() const { return Insts.empty(); }
MachineInstr &instr_front() { return Insts.front(); }
MachineInstr &instr_back() { return Insts.back(); }
const MachineInstr &instr_front() const { return Insts.front(); }
const MachineInstr &instr_back() const { return Insts.back(); }
MachineInstr &front() { return Insts.front(); }
MachineInstr &back() { return *--end(); }
const MachineInstr &front() const { return Insts.front(); }
const MachineInstr &back() const { return *--end(); }
instr_iterator instr_begin() { return Insts.begin(); }
const_instr_iterator instr_begin() const { return Insts.begin(); }
instr_iterator instr_end() { return Insts.end(); }
const_instr_iterator instr_end() const { return Insts.end(); }
reverse_instr_iterator instr_rbegin() { return Insts.rbegin(); }
const_reverse_instr_iterator instr_rbegin() const { return Insts.rbegin(); }
reverse_instr_iterator instr_rend () { return Insts.rend(); }
const_reverse_instr_iterator instr_rend () const { return Insts.rend(); }
iterator begin() { return instr_begin(); }
const_iterator begin() const { return instr_begin(); }
iterator end () { return instr_end(); }
const_iterator end () const { return instr_end(); }
reverse_iterator rbegin() { return instr_rbegin(); }
const_reverse_iterator rbegin() const { return instr_rbegin(); }
reverse_iterator rend () { return instr_rend(); }
const_reverse_iterator rend () const { return instr_rend(); }
inline iterator_range<iterator> terminators() {
return iterator_range<iterator>(getFirstTerminator(), end());
}
inline iterator_range<const_iterator> terminators() const {
return iterator_range<const_iterator>(getFirstTerminator(), end());
}
// Machine-CFG iterators
typedef std::vector<MachineBasicBlock *>::iterator pred_iterator;
typedef std::vector<MachineBasicBlock *>::const_iterator const_pred_iterator;
typedef std::vector<MachineBasicBlock *>::iterator succ_iterator;
typedef std::vector<MachineBasicBlock *>::const_iterator const_succ_iterator;
typedef std::vector<MachineBasicBlock *>::reverse_iterator
pred_reverse_iterator;
typedef std::vector<MachineBasicBlock *>::const_reverse_iterator
const_pred_reverse_iterator;
typedef std::vector<MachineBasicBlock *>::reverse_iterator
succ_reverse_iterator;
typedef std::vector<MachineBasicBlock *>::const_reverse_iterator
const_succ_reverse_iterator;
pred_iterator pred_begin() { return Predecessors.begin(); }
const_pred_iterator pred_begin() const { return Predecessors.begin(); }
pred_iterator pred_end() { return Predecessors.end(); }
const_pred_iterator pred_end() const { return Predecessors.end(); }
pred_reverse_iterator pred_rbegin()
{ return Predecessors.rbegin();}
const_pred_reverse_iterator pred_rbegin() const
{ return Predecessors.rbegin();}
pred_reverse_iterator pred_rend()
{ return Predecessors.rend(); }
const_pred_reverse_iterator pred_rend() const
{ return Predecessors.rend(); }
unsigned pred_size() const {
return (unsigned)Predecessors.size();
}
bool pred_empty() const { return Predecessors.empty(); }
succ_iterator succ_begin() { return Successors.begin(); }
const_succ_iterator succ_begin() const { return Successors.begin(); }
succ_iterator succ_end() { return Successors.end(); }
const_succ_iterator succ_end() const { return Successors.end(); }
succ_reverse_iterator succ_rbegin()
{ return Successors.rbegin(); }
const_succ_reverse_iterator succ_rbegin() const
{ return Successors.rbegin(); }
succ_reverse_iterator succ_rend()
{ return Successors.rend(); }
const_succ_reverse_iterator succ_rend() const
{ return Successors.rend(); }
unsigned succ_size() const {
return (unsigned)Successors.size();
}
bool succ_empty() const { return Successors.empty(); }
inline iterator_range<pred_iterator> predecessors() {
return iterator_range<pred_iterator>(pred_begin(), pred_end());
}
inline iterator_range<const_pred_iterator> predecessors() const {
return iterator_range<const_pred_iterator>(pred_begin(), pred_end());
}
inline iterator_range<succ_iterator> successors() {
return iterator_range<succ_iterator>(succ_begin(), succ_end());
}
inline iterator_range<const_succ_iterator> successors() const {
return iterator_range<const_succ_iterator>(succ_begin(), succ_end());
}
// LiveIn management methods.
/// Adds the specified register as a live in. Note that it is an error to add
/// the same register to the same set more than once unless the intention is
/// to call sortUniqueLiveIns after all registers are added.
void addLiveIn(unsigned Reg) { LiveIns.push_back(Reg); }
/// Sorts and uniques the LiveIns vector. It can be significantly faster to do
/// this than repeatedly calling isLiveIn before calling addLiveIn for every
/// LiveIn insertion.
void sortUniqueLiveIns() {
std::sort(LiveIns.begin(), LiveIns.end());
LiveIns.erase(std::unique(LiveIns.begin(), LiveIns.end()), LiveIns.end());
}
/// Add PhysReg as live in to this block, and ensure that there is a copy of
/// PhysReg to a virtual register of class RC. Return the virtual register
/// that is a copy of the live in PhysReg.
unsigned addLiveIn(unsigned PhysReg, const TargetRegisterClass *RC);
/// removeLiveIn - Remove the specified register from the live in set.
///
void removeLiveIn(unsigned Reg);
/// isLiveIn - Return true if the specified register is in the live in set.
///
bool isLiveIn(unsigned Reg) const;
// Iteration support for live in sets. These sets are kept in sorted
// order by their register number.
typedef std::vector<unsigned>::const_iterator livein_iterator;
livein_iterator livein_begin() const { return LiveIns.begin(); }
livein_iterator livein_end() const { return LiveIns.end(); }
bool livein_empty() const { return LiveIns.empty(); }
/// getAlignment - Return alignment of the basic block.
/// The alignment is specified as log2(bytes).
///
unsigned getAlignment() const { return Alignment; }
/// setAlignment - Set alignment of the basic block.
/// The alignment is specified as log2(bytes).
///
void setAlignment(unsigned Align) { Alignment = Align; }
/// isLandingPad - Returns true if the block is a landing pad. That is
/// this basic block is entered via an exception handler.
bool isLandingPad() const { return IsLandingPad; }
/// setIsLandingPad - Indicates the block is a landing pad. That is
/// this basic block is entered via an exception handler.
void setIsLandingPad(bool V = true) { IsLandingPad = V; }
/// getLandingPadSuccessor - If this block has a successor that is a landing
/// pad, return it. Otherwise return NULL.
const MachineBasicBlock *getLandingPadSuccessor() const;
// Code Layout methods.
/// moveBefore/moveAfter - move 'this' block before or after the specified
/// block. This only moves the block, it does not modify the CFG or adjust
/// potential fall-throughs at the end of the block.
void moveBefore(MachineBasicBlock *NewAfter);
void moveAfter(MachineBasicBlock *NewBefore);
/// updateTerminator - Update the terminator instructions in block to account
/// for changes to the layout. If the block previously used a fallthrough,
/// it may now need a branch, and if it previously used branching it may now
/// be able to use a fallthrough.
void updateTerminator();
// Machine-CFG mutators
/// addSuccessor - Add succ as a successor of this MachineBasicBlock.
/// The Predecessors list of succ is automatically updated. WEIGHT
/// parameter is stored in Weights list and it may be used by
/// MachineBranchProbabilityInfo analysis to calculate branch probability.
///
/// Note that duplicate Machine CFG edges are not allowed.
///
void addSuccessor(MachineBasicBlock *succ, uint32_t weight = 0);
/// Set successor weight of a given iterator.
void setSuccWeight(succ_iterator I, uint32_t weight);
/// removeSuccessor - Remove successor from the successors list of this
/// MachineBasicBlock. The Predecessors list of succ is automatically updated.
///
void removeSuccessor(MachineBasicBlock *succ);
/// removeSuccessor - Remove specified successor from the successors list of
/// this MachineBasicBlock. The Predecessors list of succ is automatically
/// updated. Return the iterator to the element after the one removed.
///
succ_iterator removeSuccessor(succ_iterator I);
/// replaceSuccessor - Replace successor OLD with NEW and update weight info.
///
void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New);
/// transferSuccessors - Transfers all the successors from MBB to this
/// machine basic block (i.e., copies all the successors fromMBB and
/// remove all the successors from fromMBB).
void transferSuccessors(MachineBasicBlock *fromMBB);
/// transferSuccessorsAndUpdatePHIs - Transfers all the successors, as
/// in transferSuccessors, and update PHI operands in the successor blocks
/// which refer to fromMBB to refer to this.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB);
/// isPredecessor - Return true if the specified MBB is a predecessor of this
/// block.
bool isPredecessor(const MachineBasicBlock *MBB) const;
/// isSuccessor - Return true if the specified MBB is a successor of this
/// block.
bool isSuccessor(const MachineBasicBlock *MBB) const;
/// isLayoutSuccessor - Return true if the specified MBB will be emitted
/// immediately after this block, such that if this block exits by
/// falling through, control will transfer to the specified MBB. Note
/// that MBB need not be a successor at all, for example if this block
/// ends with an unconditional branch to some other block.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const;
/// canFallThrough - Return true if the block can implicitly transfer
/// control to the block after it by falling off the end of it. This should
/// return false if it can reach the block after it, but it uses an explicit
/// branch to do so (e.g., a table jump). True is a conservative answer.
bool canFallThrough();
/// Returns a pointer to the first instruction in this block that is not a
/// PHINode instruction. When adding instructions to the beginning of the
/// basic block, they should be added before the returned value, not before
/// the first instruction, which might be PHI.
/// Returns end() is there's no non-PHI instruction.
iterator getFirstNonPHI();
/// SkipPHIsAndLabels - Return the first instruction in MBB after I that is
/// not a PHI or a label. This is the correct point to insert copies at the
/// beginning of a basic block.
iterator SkipPHIsAndLabels(iterator I);
/// getFirstTerminator - returns an iterator to the first terminator
/// instruction of this basic block. If a terminator does not exist,
/// it returns end()
iterator getFirstTerminator();
const_iterator getFirstTerminator() const {
return const_cast<MachineBasicBlock *>(this)->getFirstTerminator();
}
/// getFirstInstrTerminator - Same getFirstTerminator but it ignores bundles
/// and return an instr_iterator instead.
instr_iterator getFirstInstrTerminator();
/// getFirstNonDebugInstr - returns an iterator to the first non-debug
/// instruction in the basic block, or end()
iterator getFirstNonDebugInstr();
const_iterator getFirstNonDebugInstr() const {
return const_cast<MachineBasicBlock *>(this)->getFirstNonDebugInstr();
}
/// getLastNonDebugInstr - returns an iterator to the last non-debug
/// instruction in the basic block, or end()
iterator getLastNonDebugInstr();
const_iterator getLastNonDebugInstr() const {
return const_cast<MachineBasicBlock *>(this)->getLastNonDebugInstr();
}
/// SplitCriticalEdge - Split the critical edge from this block to the
/// given successor block, and return the newly created block, or null
/// if splitting is not possible.
///
/// This function updates LiveVariables, MachineDominatorTree, and
/// MachineLoopInfo, as applicable.
MachineBasicBlock *SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P);
void pop_front() { Insts.pop_front(); }
void pop_back() { Insts.pop_back(); }
void push_back(MachineInstr *MI) { Insts.push_back(MI); }
/// Insert MI into the instruction list before I, possibly inside a bundle.
///
/// If the insertion point is inside a bundle, MI will be added to the bundle,
/// otherwise MI will not be added to any bundle. That means this function
/// alone can't be used to prepend or append instructions to bundles. See
/// MIBundleBuilder::insert() for a more reliable way of doing that.
instr_iterator insert(instr_iterator I, MachineInstr *M);
/// Insert a range of instructions into the instruction list before I.
template<typename IT>
void insert(iterator I, IT S, IT E) {
assert((I == end() || I->getParent() == this) &&
"iterator points outside of basic block");
Insts.insert(I.getInstrIterator(), S, E);
}
/// Insert MI into the instruction list before I.
iterator insert(iterator I, MachineInstr *MI) {
assert((I == end() || I->getParent() == this) &&
"iterator points outside of basic block");
assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
"Cannot insert instruction with bundle flags");
return Insts.insert(I.getInstrIterator(), MI);
}
/// Insert MI into the instruction list after I.
iterator insertAfter(iterator I, MachineInstr *MI) {
assert((I == end() || I->getParent() == this) &&
"iterator points outside of basic block");
assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
"Cannot insert instruction with bundle flags");
return Insts.insertAfter(I.getInstrIterator(), MI);
}
/// Remove an instruction from the instruction list and delete it.
///
/// If the instruction is part of a bundle, the other instructions in the
/// bundle will still be bundled after removing the single instruction.
instr_iterator erase(instr_iterator I);
/// Remove an instruction from the instruction list and delete it.
///
/// If the instruction is part of a bundle, the other instructions in the
/// bundle will still be bundled after removing the single instruction.
instr_iterator erase_instr(MachineInstr *I) {
return erase(instr_iterator(I));
}
/// Remove a range of instructions from the instruction list and delete them.
iterator erase(iterator I, iterator E) {
return Insts.erase(I.getInstrIterator(), E.getInstrIterator());
}
/// Remove an instruction or bundle from the instruction list and delete it.
///
/// If I points to a bundle of instructions, they are all erased.
iterator erase(iterator I) {
return erase(I, std::next(I));
}
/// Remove an instruction from the instruction list and delete it.
///
/// If I is the head of a bundle of instructions, the whole bundle will be
/// erased.
iterator erase(MachineInstr *I) {
return erase(iterator(I));
}
/// Remove the unbundled instruction from the instruction list without
/// deleting it.
///
/// This function can not be used to remove bundled instructions, use
/// remove_instr to remove individual instructions from a bundle.
MachineInstr *remove(MachineInstr *I) {
assert(!I->isBundled() && "Cannot remove bundled instructions");
return Insts.remove(I);
}
/// Remove the possibly bundled instruction from the instruction list
/// without deleting it.
///
/// If the instruction is part of a bundle, the other instructions in the
/// bundle will still be bundled after removing the single instruction.
MachineInstr *remove_instr(MachineInstr *I);
void clear() {
Insts.clear();
}
/// Take an instruction from MBB 'Other' at the position From, and insert it
/// into this MBB right before 'Where'.
///
/// If From points to a bundle of instructions, the whole bundle is moved.
void splice(iterator Where, MachineBasicBlock *Other, iterator From) {
// The range splice() doesn't allow noop moves, but this one does.
if (Where != From)
splice(Where, Other, From, std::next(From));
}
/// Take a block of instructions from MBB 'Other' in the range [From, To),
/// and insert them into this MBB right before 'Where'.
///
/// The instruction at 'Where' must not be included in the range of
/// instructions to move.
void splice(iterator Where, MachineBasicBlock *Other,
iterator From, iterator To) {
Insts.splice(Where.getInstrIterator(), Other->Insts,
From.getInstrIterator(), To.getInstrIterator());
}
/// removeFromParent - This method unlinks 'this' from the containing
/// function, and returns it, but does not delete it.
MachineBasicBlock *removeFromParent();
/// eraseFromParent - This method unlinks 'this' from the containing
/// function and deletes it.
void eraseFromParent();
/// ReplaceUsesOfBlockWith - Given a machine basic block that branched to
/// 'Old', change the code and CFG so that it branches to 'New' instead.
void ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock *New);
/// CorrectExtraCFGEdges - Various pieces of code can cause excess edges in
/// the CFG to be inserted. If we have proven that MBB can only branch to
/// DestA and DestB, remove any other MBB successors from the CFG. DestA and
/// DestB can be null. Besides DestA and DestB, retain other edges leading
/// to LandingPads (currently there can be only one; we don't check or require
/// that here). Note it is possible that DestA and/or DestB are LandingPads.
bool CorrectExtraCFGEdges(MachineBasicBlock *DestA,
MachineBasicBlock *DestB,
bool isCond);
/// findDebugLoc - find the next valid DebugLoc starting at MBBI, skipping
/// any DBG_VALUE instructions. Return UnknownLoc if there is none.
DebugLoc findDebugLoc(instr_iterator MBBI);
DebugLoc findDebugLoc(iterator MBBI) {
return findDebugLoc(MBBI.getInstrIterator());
}
/// Possible outcome of a register liveness query to computeRegisterLiveness()
enum LivenessQueryResult {
LQR_Live, ///< Register is known to be live.
LQR_OverlappingLive, ///< Register itself is not live, but some overlapping
///< register is.
LQR_Dead, ///< Register is known to be dead.
LQR_Unknown ///< Register liveness not decidable from local
///< neighborhood.
};
/// Return whether (physical) register \p Reg has been <def>ined and not
/// <kill>ed as of just before \p Before.
///
/// Search is localised to a neighborhood of \p Neighborhood instructions
/// before (searching for defs or kills) and \p Neighborhood instructions
/// after (searching just for defs) \p Before.
///
/// \p Reg must be a physical register.
LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI,
unsigned Reg,
const_iterator Before,
unsigned Neighborhood=10) const;
// Debugging methods.
void dump() const;
void print(raw_ostream &OS, SlotIndexes* = nullptr) const;
void print(raw_ostream &OS, ModuleSlotTracker &MST,
SlotIndexes * = nullptr) const;
// Printing method used by LoopInfo.
void printAsOperand(raw_ostream &OS, bool PrintType = true) const;
/// getNumber - MachineBasicBlocks are uniquely numbered at the function
/// level, unless they're not in a MachineFunction yet, in which case this
/// will return -1.
///
int getNumber() const { return Number; }
void setNumber(int N) { Number = N; }
/// getSymbol - Return the MCSymbol for this basic block.
///
MCSymbol *getSymbol() const;
private:
/// getWeightIterator - Return weight iterator corresponding to the I
/// successor iterator.
weight_iterator getWeightIterator(succ_iterator I);
const_weight_iterator getWeightIterator(const_succ_iterator I) const;
friend class MachineBranchProbabilityInfo;
/// getSuccWeight - Return weight of the edge from this block to MBB. This
/// method should NOT be called directly, but by using getEdgeWeight method
/// from MachineBranchProbabilityInfo class.
uint32_t getSuccWeight(const_succ_iterator Succ) const;
// Methods used to maintain doubly linked list of blocks...
friend struct ilist_traits<MachineBasicBlock>;
// Machine-CFG mutators
/// addPredecessor - Remove pred as a predecessor of this MachineBasicBlock.
/// Don't do this unless you know what you're doing, because it doesn't
/// update pred's successors list. Use pred->addSuccessor instead.
///
void addPredecessor(MachineBasicBlock *pred);
/// removePredecessor - Remove pred as a predecessor of this
/// MachineBasicBlock. Don't do this unless you know what you're
/// doing, because it doesn't update pred's successors list. Use
/// pred->removeSuccessor instead.
///
void removePredecessor(MachineBasicBlock *pred);
};
raw_ostream& operator<<(raw_ostream &OS, const MachineBasicBlock &MBB);
// This is useful when building IndexedMaps keyed on basic block pointers.
struct MBB2NumberFunctor {
unsigned operator()(const MachineBasicBlock *MBB) const {
return MBB->getNumber();
}
};
//===--------------------------------------------------------------------===//
// GraphTraits specializations for machine basic block graphs (machine-CFGs)
//===--------------------------------------------------------------------===//
// Provide specializations of GraphTraits to be able to treat a
// MachineFunction as a graph of MachineBasicBlocks...
//
template <> struct GraphTraits<MachineBasicBlock *> {
typedef MachineBasicBlock NodeType;
typedef MachineBasicBlock::succ_iterator ChildIteratorType;
static NodeType *getEntryNode(MachineBasicBlock *BB) { return BB; }
static inline ChildIteratorType child_begin(NodeType *N) {
return N->succ_begin();
}
static inline ChildIteratorType child_end(NodeType *N) {
return N->succ_end();
}
};
template <> struct GraphTraits<const MachineBasicBlock *> {
typedef const MachineBasicBlock NodeType;
typedef MachineBasicBlock::const_succ_iterator ChildIteratorType;
static NodeType *getEntryNode(const MachineBasicBlock *BB) { return BB; }
static inline ChildIteratorType child_begin(NodeType *N) {
return N->succ_begin();
}
static inline ChildIteratorType child_end(NodeType *N) {
return N->succ_end();
}
};
// Provide specializations of GraphTraits to be able to treat a
// MachineFunction as a graph of MachineBasicBlocks... and to walk it
// in inverse order. Inverse order for a function is considered
// to be when traversing the predecessor edges of a MBB
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<MachineBasicBlock*> > {
typedef MachineBasicBlock NodeType;
typedef MachineBasicBlock::pred_iterator ChildIteratorType;
static NodeType *getEntryNode(Inverse<MachineBasicBlock *> G) {
return G.Graph;
}
static inline ChildIteratorType child_begin(NodeType *N) {
return N->pred_begin();
}
static inline ChildIteratorType child_end(NodeType *N) {
return N->pred_end();
}
};
template <> struct GraphTraits<Inverse<const MachineBasicBlock*> > {
typedef const MachineBasicBlock NodeType;
typedef MachineBasicBlock::const_pred_iterator ChildIteratorType;
static NodeType *getEntryNode(Inverse<const MachineBasicBlock*> G) {
return G.Graph;
}
static inline ChildIteratorType child_begin(NodeType *N) {
return N->pred_begin();
}
static inline ChildIteratorType child_end(NodeType *N) {
return N->pred_end();
}
};
/// MachineInstrSpan provides an interface to get an iteration range
/// containing the instruction it was initialized with, along with all
/// those instructions inserted prior to or following that instruction
/// at some point after the MachineInstrSpan is constructed.
class MachineInstrSpan {
MachineBasicBlock &MBB;
MachineBasicBlock::iterator I, B, E;
public:
MachineInstrSpan(MachineBasicBlock::iterator I)
: MBB(*I->getParent()),
I(I),
B(I == MBB.begin() ? MBB.end() : std::prev(I)),
E(std::next(I)) {}
MachineBasicBlock::iterator begin() {
return B == MBB.end() ? MBB.begin() : std::next(B);
}
MachineBasicBlock::iterator end() { return E; }
bool empty() { return begin() == end(); }
MachineBasicBlock::iterator getInitial() { return I; }
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/PBQPRAConstraint.h | //===-- RegAllocPBQP.h ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the PBQPBuilder interface, for classes which build PBQP
// instances to represent register allocation problems, and the RegAllocPBQP
// interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_PBQPRACONSTRAINT_H
#define LLVM_CODEGEN_PBQPRACONSTRAINT_H
#include <memory>
#include <vector>
namespace llvm {
namespace PBQP {
namespace RegAlloc {
// Forward declare PBQP graph class.
class PBQPRAGraph;
}
}
class LiveIntervals;
class MachineBlockFrequencyInfo;
class MachineFunction;
class TargetRegisterInfo;
typedef PBQP::RegAlloc::PBQPRAGraph PBQPRAGraph;
/// @brief Abstract base for classes implementing PBQP register allocation
/// constraints (e.g. Spill-costs, interference, coalescing).
class PBQPRAConstraint {
public:
virtual ~PBQPRAConstraint() = 0;
virtual void apply(PBQPRAGraph &G) = 0;
private:
virtual void anchor();
};
/// @brief PBQP register allocation constraint composer.
///
/// Constraints added to this list will be applied, in the order that they are
/// added, to the PBQP graph.
class PBQPRAConstraintList : public PBQPRAConstraint {
public:
void apply(PBQPRAGraph &G) override {
for (auto &C : Constraints)
C->apply(G);
}
void addConstraint(std::unique_ptr<PBQPRAConstraint> C) {
if (C)
Constraints.push_back(std::move(C));
}
private:
std::vector<std::unique_ptr<PBQPRAConstraint>> Constraints;
void anchor() override;
};
}
#endif /* LLVM_CODEGEN_PBQPRACONSTRAINT_H */
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineModuleInfo.h | //===-- llvm/CodeGen/MachineModuleInfo.h ------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Collect meta information for a module. This information should be in a
// neutral form that can be used by different debugging and exception handling
// schemes.
//
// The organization of information is primarily clustered around the source
// compile units. The main exception is source line correspondence where
// inlining may interleave code from various compile units.
//
// The following information can be retrieved from the MachineModuleInfo.
//
// -- Source directories - Directories are uniqued based on their canonical
// string and assigned a sequential numeric ID (base 1.)
// -- Source files - Files are also uniqued based on their name and directory
// ID. A file ID is sequential number (base 1.)
// -- Source line correspondence - A vector of file ID, line#, column# triples.
// A DEBUG_LOCATION instruction is generated by the DAG Legalizer
// corresponding to each entry in the source line list. This allows a debug
// emitter to generate labels referenced by debug information tables.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEMODULEINFO_H
#define LLVM_CODEGEN_MACHINEMODULEINFO_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/LibCallSemantics.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MachineLocation.h"
#include "llvm/Pass.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Dwarf.h"
namespace llvm {
//===----------------------------------------------------------------------===//
// Forward declarations.
class Constant;
class GlobalVariable;
class BlockAddress;
class MDNode;
class MMIAddrLabelMap;
class MachineBasicBlock;
class MachineFunction;
class Module;
class PointerType;
class StructType;
struct WinEHFuncInfo;
struct SEHHandler {
// Filter or finally function. Null indicates a catch-all.
const Function *FilterOrFinally;
// Address of block to recover at. Null for a finally handler.
const BlockAddress *RecoverBA;
};
//===----------------------------------------------------------------------===//
/// LandingPadInfo - This structure is used to retain landing pad info for
/// the current function.
///
struct LandingPadInfo {
MachineBasicBlock *LandingPadBlock; // Landing pad block.
SmallVector<MCSymbol *, 1> BeginLabels; // Labels prior to invoke.
SmallVector<MCSymbol *, 1> EndLabels; // Labels after invoke.
SmallVector<SEHHandler, 1> SEHHandlers; // SEH handlers active at this lpad.
MCSymbol *LandingPadLabel; // Label at beginning of landing pad.
const Function *Personality; // Personality function.
std::vector<int> TypeIds; // List of type ids (filters negative).
int WinEHState; // WinEH specific state number.
explicit LandingPadInfo(MachineBasicBlock *MBB)
: LandingPadBlock(MBB), LandingPadLabel(nullptr), Personality(nullptr),
WinEHState(-1) {}
};
//===----------------------------------------------------------------------===//
/// MachineModuleInfoImpl - This class can be derived from and used by targets
/// to hold private target-specific information for each Module. Objects of
/// type are accessed/created with MMI::getInfo and destroyed when the
/// MachineModuleInfo is destroyed.
///
class MachineModuleInfoImpl {
public:
typedef PointerIntPair<MCSymbol*, 1, bool> StubValueTy;
virtual ~MachineModuleInfoImpl();
typedef std::vector<std::pair<MCSymbol*, StubValueTy> > SymbolListTy;
protected:
/// Return the entries from a DenseMap in a deterministic sorted orer.
/// Clears the map.
static SymbolListTy getSortedStubs(DenseMap<MCSymbol*, StubValueTy>&);
};
// //
///////////////////////////////////////////////////////////////////////////////
/// MachineModuleInfo - This class contains meta information specific to a
/// module. Queries can be made by different debugging and exception handling
/// schemes and reformated for specific use.
///
class MachineModuleInfo : public ImmutablePass {
/// Context - This is the MCContext used for the entire code generator.
MCContext Context;
/// TheModule - This is the LLVM Module being worked on.
const Module *TheModule;
/// ObjFileMMI - This is the object-file-format-specific implementation of
/// MachineModuleInfoImpl, which lets targets accumulate whatever info they
/// want.
MachineModuleInfoImpl *ObjFileMMI;
/// List of moves done by a function's prolog. Used to construct frame maps
/// by debug and exception handling consumers.
std::vector<MCCFIInstruction> FrameInstructions;
/// LandingPads - List of LandingPadInfo describing the landing pad
/// information in the current function.
std::vector<LandingPadInfo> LandingPads;
/// LPadToCallSiteMap - Map a landing pad's EH symbol to the call site
/// indexes.
DenseMap<MCSymbol*, SmallVector<unsigned, 4> > LPadToCallSiteMap;
/// CallSiteMap - Map of invoke call site index values to associated begin
/// EH_LABEL for the current function.
DenseMap<MCSymbol*, unsigned> CallSiteMap;
/// CurCallSite - The current call site index being processed, if any. 0 if
/// none.
unsigned CurCallSite;
/// TypeInfos - List of C++ TypeInfo used in the current function.
std::vector<const GlobalValue *> TypeInfos;
/// FilterIds - List of typeids encoding filters used in the current function.
std::vector<unsigned> FilterIds;
/// FilterEnds - List of the indices in FilterIds corresponding to filter
/// terminators.
std::vector<unsigned> FilterEnds;
/// Personalities - Vector of all personality functions ever seen. Used to
/// emit common EH frames.
std::vector<const Function *> Personalities;
/// AddrLabelSymbols - This map keeps track of which symbol is being used for
/// the specified basic block's address of label.
MMIAddrLabelMap *AddrLabelSymbols;
bool CallsEHReturn;
bool CallsUnwindInit;
/// DbgInfoAvailable - True if debugging information is available
/// in this module.
bool DbgInfoAvailable;
/// UsesVAFloatArgument - True if this module calls VarArg function with
/// floating-point arguments. This is used to emit an undefined reference
/// to _fltused on Windows targets.
bool UsesVAFloatArgument;
/// UsesMorestackAddr - True if the module calls the __morestack function
/// indirectly, as is required under the large code model on x86. This is used
/// to emit a definition of a symbol, __morestack_addr, containing the
/// address. See comments in lib/Target/X86/X86FrameLowering.cpp for more
/// details.
bool UsesMorestackAddr;
EHPersonality PersonalityTypeCache;
DenseMap<const Function *, std::unique_ptr<WinEHFuncInfo>> FuncInfoMap;
public:
static char ID; // Pass identification, replacement for typeid
struct VariableDbgInfo {
const DILocalVariable *Var;
const DIExpression *Expr;
unsigned Slot;
const DILocation *Loc;
VariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
unsigned Slot, const DILocation *Loc)
: Var(Var), Expr(Expr), Slot(Slot), Loc(Loc) {}
};
typedef SmallVector<VariableDbgInfo, 4> VariableDbgInfoMapTy;
VariableDbgInfoMapTy VariableDbgInfos;
MachineModuleInfo(); // DUMMY CONSTRUCTOR, DO NOT CALL.
// Real constructor.
MachineModuleInfo(const MCAsmInfo &MAI, const MCRegisterInfo &MRI,
const MCObjectFileInfo *MOFI);
~MachineModuleInfo() override;
// Initialization and Finalization
bool doInitialization(Module &) override;
bool doFinalization(Module &) override;
/// EndFunction - Discard function meta information.
///
void EndFunction();
const MCContext &getContext() const { return Context; }
MCContext &getContext() { return Context; }
void setModule(const Module *M) { TheModule = M; }
const Module *getModule() const { return TheModule; }
const Function *getWinEHParent(const Function *F) const;
WinEHFuncInfo &getWinEHFuncInfo(const Function *F);
bool hasWinEHFuncInfo(const Function *F) const {
return FuncInfoMap.count(getWinEHParent(F)) > 0;
}
/// getInfo - Keep track of various per-function pieces of information for
/// backends that would like to do so.
///
template<typename Ty>
Ty &getObjFileInfo() {
if (ObjFileMMI == nullptr)
ObjFileMMI = new Ty(*this);
return *static_cast<Ty*>(ObjFileMMI);
}
template<typename Ty>
const Ty &getObjFileInfo() const {
return const_cast<MachineModuleInfo*>(this)->getObjFileInfo<Ty>();
}
/// hasDebugInfo - Returns true if valid debug info is present.
///
bool hasDebugInfo() const { return DbgInfoAvailable; }
void setDebugInfoAvailability(bool avail) { DbgInfoAvailable = avail; }
bool callsEHReturn() const { return CallsEHReturn; }
void setCallsEHReturn(bool b) { CallsEHReturn = b; }
bool callsUnwindInit() const { return CallsUnwindInit; }
void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }
bool usesVAFloatArgument() const {
return UsesVAFloatArgument;
}
void setUsesVAFloatArgument(bool b) {
UsesVAFloatArgument = b;
}
bool usesMorestackAddr() const {
return UsesMorestackAddr;
}
void setUsesMorestackAddr(bool b) {
UsesMorestackAddr = b;
}
/// \brief Returns a reference to a list of cfi instructions in the current
/// function's prologue. Used to construct frame maps for debug and exception
/// handling comsumers.
const std::vector<MCCFIInstruction> &getFrameInstructions() const {
return FrameInstructions;
}
unsigned LLVM_ATTRIBUTE_UNUSED_RESULT
addFrameInst(const MCCFIInstruction &Inst) {
FrameInstructions.push_back(Inst);
return FrameInstructions.size() - 1;
}
/// getAddrLabelSymbol - Return the symbol to be used for the specified basic
/// block when its address is taken. This cannot be its normal LBB label
/// because the block may be accessed outside its containing function.
MCSymbol *getAddrLabelSymbol(const BasicBlock *BB) {
return getAddrLabelSymbolToEmit(BB).front();
}
/// getAddrLabelSymbolToEmit - Return the symbol to be used for the specified
/// basic block when its address is taken. If other blocks were RAUW'd to
/// this one, we may have to emit them as well, return the whole set.
ArrayRef<MCSymbol *> getAddrLabelSymbolToEmit(const BasicBlock *BB);
/// takeDeletedSymbolsForFunction - If the specified function has had any
/// references to address-taken blocks generated, but the block got deleted,
/// return the symbol now so we can emit it. This prevents emitting a
/// reference to a symbol that has no definition.
void takeDeletedSymbolsForFunction(const Function *F,
std::vector<MCSymbol*> &Result);
//===- EH ---------------------------------------------------------------===//
/// getOrCreateLandingPadInfo - Find or create an LandingPadInfo for the
/// specified MachineBasicBlock.
LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad);
/// addInvoke - Provide the begin and end labels of an invoke style call and
/// associate it with a try landing pad block.
void addInvoke(MachineBasicBlock *LandingPad,
MCSymbol *BeginLabel, MCSymbol *EndLabel);
/// addLandingPad - Add a new panding pad. Returns the label ID for the
/// landing pad entry.
MCSymbol *addLandingPad(MachineBasicBlock *LandingPad);
/// addPersonality - Provide the personality function for the exception
/// information.
void addPersonality(MachineBasicBlock *LandingPad,
const Function *Personality);
void addPersonality(const Function *Personality);
void addWinEHState(MachineBasicBlock *LandingPad, int State);
/// getPersonalityIndex - Get index of the current personality function inside
/// Personalitites array
unsigned getPersonalityIndex() const;
/// getPersonalities - Return array of personality functions ever seen.
const std::vector<const Function *>& getPersonalities() const {
return Personalities;
}
/// addCatchTypeInfo - Provide the catch typeinfo for a landing pad.
///
void addCatchTypeInfo(MachineBasicBlock *LandingPad,
ArrayRef<const GlobalValue *> TyInfo);
/// addFilterTypeInfo - Provide the filter typeinfo for a landing pad.
///
void addFilterTypeInfo(MachineBasicBlock *LandingPad,
ArrayRef<const GlobalValue *> TyInfo);
/// addCleanup - Add a cleanup action for a landing pad.
///
void addCleanup(MachineBasicBlock *LandingPad);
void addSEHCatchHandler(MachineBasicBlock *LandingPad, const Function *Filter,
const BlockAddress *RecoverLabel);
void addSEHCleanupHandler(MachineBasicBlock *LandingPad,
const Function *Cleanup);
/// getTypeIDFor - Return the type id for the specified typeinfo. This is
/// function wide.
unsigned getTypeIDFor(const GlobalValue *TI);
/// getFilterIDFor - Return the id of the filter encoded by TyIds. This is
/// function wide.
int getFilterIDFor(std::vector<unsigned> &TyIds);
/// TidyLandingPads - Remap landing pad labels and remove any deleted landing
/// pads.
void TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = nullptr);
/// getLandingPads - Return a reference to the landing pad info for the
/// current function.
const std::vector<LandingPadInfo> &getLandingPads() const {
return LandingPads;
}
/// setCallSiteLandingPad - Map the landing pad's EH symbol to the call
/// site indexes.
void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef<unsigned> Sites);
/// getCallSiteLandingPad - Get the call site indexes for a landing pad EH
/// symbol.
SmallVectorImpl<unsigned> &getCallSiteLandingPad(MCSymbol *Sym) {
assert(hasCallSiteLandingPad(Sym) &&
"missing call site number for landing pad!");
return LPadToCallSiteMap[Sym];
}
/// hasCallSiteLandingPad - Return true if the landing pad Eh symbol has an
/// associated call site.
bool hasCallSiteLandingPad(MCSymbol *Sym) {
return !LPadToCallSiteMap[Sym].empty();
}
/// setCallSiteBeginLabel - Map the begin label for a call site.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site) {
CallSiteMap[BeginLabel] = Site;
}
/// getCallSiteBeginLabel - Get the call site number for a begin label.
unsigned getCallSiteBeginLabel(MCSymbol *BeginLabel) {
assert(hasCallSiteBeginLabel(BeginLabel) &&
"Missing call site number for EH_LABEL!");
return CallSiteMap[BeginLabel];
}
/// hasCallSiteBeginLabel - Return true if the begin label has a call site
/// number associated with it.
bool hasCallSiteBeginLabel(MCSymbol *BeginLabel) {
return CallSiteMap[BeginLabel] != 0;
}
/// setCurrentCallSite - Set the call site currently being processed.
void setCurrentCallSite(unsigned Site) { CurCallSite = Site; }
/// getCurrentCallSite - Get the call site currently being processed, if any.
/// return zero if none.
unsigned getCurrentCallSite() { return CurCallSite; }
/// getTypeInfos - Return a reference to the C++ typeinfo for the current
/// function.
const std::vector<const GlobalValue *> &getTypeInfos() const {
return TypeInfos;
}
/// getFilterIds - Return a reference to the typeids encoding filters used in
/// the current function.
const std::vector<unsigned> &getFilterIds() const {
return FilterIds;
}
/// getPersonality - Return a personality function if available. The presence
/// of one is required to emit exception handling info.
const Function *getPersonality() const;
/// Classify the personality function amongst known EH styles.
EHPersonality getPersonalityType();
/// setVariableDbgInfo - Collect information used to emit debugging
/// information of a variable.
void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
unsigned Slot, const DILocation *Loc) {
VariableDbgInfos.emplace_back(Var, Expr, Slot, Loc);
}
VariableDbgInfoMapTy &getVariableDbgInfo() { return VariableDbgInfos; }
}; // End class MachineModuleInfo
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineInstrBuilder.h | //===-- CodeGen/MachineInstBuilder.h - Simplify creation of MIs -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file exposes a function named BuildMI, which is useful for dramatically
// simplifying how MachineInstr's are created. It allows use of code like this:
//
// M = BuildMI(X86::ADDrr8, 2).addReg(argVal1).addReg(argVal2);
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEINSTRBUILDER_H
#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
class MCInstrDesc;
class MDNode;
namespace RegState {
enum {
Define = 0x2,
Implicit = 0x4,
Kill = 0x8,
Dead = 0x10,
Undef = 0x20,
EarlyClobber = 0x40,
Debug = 0x80,
InternalRead = 0x100,
DefineNoRead = Define | Undef,
ImplicitDefine = Implicit | Define,
ImplicitKill = Implicit | Kill
};
}
class MachineInstrBuilder {
MachineFunction *MF;
MachineInstr *MI;
public:
MachineInstrBuilder() : MF(nullptr), MI(nullptr) {}
/// Create a MachineInstrBuilder for manipulating an existing instruction.
/// F must be the machine function that was used to allocate I.
MachineInstrBuilder(MachineFunction &F, MachineInstr *I) : MF(&F), MI(I) {}
/// Allow automatic conversion to the machine instruction we are working on.
///
operator MachineInstr*() const { return MI; }
MachineInstr *operator->() const { return MI; }
operator MachineBasicBlock::iterator() const { return MI; }
/// If conversion operators fail, use this method to get the MachineInstr
/// explicitly.
MachineInstr *getInstr() const { return MI; }
/// addReg - Add a new virtual register operand...
///
const
MachineInstrBuilder &addReg(unsigned RegNo, unsigned flags = 0,
unsigned SubReg = 0) const {
assert((flags & 0x1) == 0 &&
"Passing in 'true' to addReg is forbidden! Use enums instead.");
MI->addOperand(*MF, MachineOperand::CreateReg(RegNo,
flags & RegState::Define,
flags & RegState::Implicit,
flags & RegState::Kill,
flags & RegState::Dead,
flags & RegState::Undef,
flags & RegState::EarlyClobber,
SubReg,
flags & RegState::Debug,
flags & RegState::InternalRead));
return *this;
}
/// addImm - Add a new immediate operand.
///
const MachineInstrBuilder &addImm(int64_t Val) const {
MI->addOperand(*MF, MachineOperand::CreateImm(Val));
return *this;
}
const MachineInstrBuilder &addCImm(const ConstantInt *Val) const {
MI->addOperand(*MF, MachineOperand::CreateCImm(Val));
return *this;
}
const MachineInstrBuilder &addFPImm(const ConstantFP *Val) const {
MI->addOperand(*MF, MachineOperand::CreateFPImm(Val));
return *this;
}
const MachineInstrBuilder &addMBB(MachineBasicBlock *MBB,
unsigned char TargetFlags = 0) const {
MI->addOperand(*MF, MachineOperand::CreateMBB(MBB, TargetFlags));
return *this;
}
const MachineInstrBuilder &addFrameIndex(int Idx) const {
MI->addOperand(*MF, MachineOperand::CreateFI(Idx));
return *this;
}
const MachineInstrBuilder &addConstantPoolIndex(unsigned Idx,
int Offset = 0,
unsigned char TargetFlags = 0) const {
MI->addOperand(*MF, MachineOperand::CreateCPI(Idx, Offset, TargetFlags));
return *this;
}
const MachineInstrBuilder &addTargetIndex(unsigned Idx, int64_t Offset = 0,
unsigned char TargetFlags = 0) const {
MI->addOperand(*MF, MachineOperand::CreateTargetIndex(Idx, Offset,
TargetFlags));
return *this;
}
const MachineInstrBuilder &addJumpTableIndex(unsigned Idx,
unsigned char TargetFlags = 0) const {
MI->addOperand(*MF, MachineOperand::CreateJTI(Idx, TargetFlags));
return *this;
}
const MachineInstrBuilder &addGlobalAddress(const GlobalValue *GV,
int64_t Offset = 0,
unsigned char TargetFlags = 0) const {
MI->addOperand(*MF, MachineOperand::CreateGA(GV, Offset, TargetFlags));
return *this;
}
const MachineInstrBuilder &addExternalSymbol(const char *FnName,
unsigned char TargetFlags = 0) const {
MI->addOperand(*MF, MachineOperand::CreateES(FnName, TargetFlags));
return *this;
}
const MachineInstrBuilder &addBlockAddress(const BlockAddress *BA,
int64_t Offset = 0,
unsigned char TargetFlags = 0) const {
MI->addOperand(*MF, MachineOperand::CreateBA(BA, Offset, TargetFlags));
return *this;
}
const MachineInstrBuilder &addRegMask(const uint32_t *Mask) const {
MI->addOperand(*MF, MachineOperand::CreateRegMask(Mask));
return *this;
}
const MachineInstrBuilder &addMemOperand(MachineMemOperand *MMO) const {
MI->addMemOperand(*MF, MMO);
return *this;
}
const MachineInstrBuilder &setMemRefs(MachineInstr::mmo_iterator b,
MachineInstr::mmo_iterator e) const {
MI->setMemRefs(b, e);
return *this;
}
const MachineInstrBuilder &addOperand(const MachineOperand &MO) const {
MI->addOperand(*MF, MO);
return *this;
}
const MachineInstrBuilder &addMetadata(const MDNode *MD) const {
MI->addOperand(*MF, MachineOperand::CreateMetadata(MD));
assert((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable())
: true) &&
"first MDNode argument of a DBG_VALUE not a variable");
return *this;
}
const MachineInstrBuilder &addCFIIndex(unsigned CFIIndex) const {
MI->addOperand(*MF, MachineOperand::CreateCFIIndex(CFIIndex));
return *this;
}
const MachineInstrBuilder &addSym(MCSymbol *Sym,
unsigned char TargetFlags = 0) const {
MI->addOperand(*MF, MachineOperand::CreateMCSymbol(Sym, TargetFlags));
return *this;
}
const MachineInstrBuilder &setMIFlags(unsigned Flags) const {
MI->setFlags(Flags);
return *this;
}
const MachineInstrBuilder &setMIFlag(MachineInstr::MIFlag Flag) const {
MI->setFlag(Flag);
return *this;
}
// Add a displacement from an existing MachineOperand with an added offset.
const MachineInstrBuilder &addDisp(const MachineOperand &Disp, int64_t off,
unsigned char TargetFlags = 0) const {
switch (Disp.getType()) {
default:
llvm_unreachable("Unhandled operand type in addDisp()");
case MachineOperand::MO_Immediate:
return addImm(Disp.getImm() + off);
case MachineOperand::MO_GlobalAddress: {
// If caller specifies new TargetFlags then use it, otherwise the
// default behavior is to copy the target flags from the existing
// MachineOperand. This means if the caller wants to clear the
// target flags it needs to do so explicitly.
if (TargetFlags)
return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
TargetFlags);
return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
Disp.getTargetFlags());
}
}
}
/// Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder ©ImplicitOps(const MachineInstr *OtherMI) {
MI->copyImplicitOps(*MF, OtherMI);
return *this;
}
};
/// BuildMI - Builder interface. Specify how to create the initial instruction
/// itself.
///
inline MachineInstrBuilder BuildMI(MachineFunction &MF,
DebugLoc DL,
const MCInstrDesc &MCID) {
return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL));
}
/// BuildMI - This version of the builder sets up the first operand as a
/// destination virtual register.
///
inline MachineInstrBuilder BuildMI(MachineFunction &MF,
DebugLoc DL,
const MCInstrDesc &MCID,
unsigned DestReg) {
return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL))
.addReg(DestReg, RegState::Define);
}
/// BuildMI - This version of the builder inserts the newly-built
/// instruction before the given position in the given MachineBasicBlock, and
/// sets up the first operand as a destination virtual register.
///
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
DebugLoc DL,
const MCInstrDesc &MCID,
unsigned DestReg) {
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
BB.insert(I, MI);
return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
}
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::instr_iterator I,
DebugLoc DL,
const MCInstrDesc &MCID,
unsigned DestReg) {
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
BB.insert(I, MI);
return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
}
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineInstr *I,
DebugLoc DL,
const MCInstrDesc &MCID,
unsigned DestReg) {
if (I->isInsideBundle()) {
MachineBasicBlock::instr_iterator MII = I;
return BuildMI(BB, MII, DL, MCID, DestReg);
}
MachineBasicBlock::iterator MII = I;
return BuildMI(BB, MII, DL, MCID, DestReg);
}
/// BuildMI - This version of the builder inserts the newly-built
/// instruction before the given position in the given MachineBasicBlock, and
/// does NOT take a destination register.
///
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
DebugLoc DL,
const MCInstrDesc &MCID) {
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
BB.insert(I, MI);
return MachineInstrBuilder(MF, MI);
}
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::instr_iterator I,
DebugLoc DL,
const MCInstrDesc &MCID) {
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
BB.insert(I, MI);
return MachineInstrBuilder(MF, MI);
}
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineInstr *I,
DebugLoc DL,
const MCInstrDesc &MCID) {
if (I->isInsideBundle()) {
MachineBasicBlock::instr_iterator MII = I;
return BuildMI(BB, MII, DL, MCID);
}
MachineBasicBlock::iterator MII = I;
return BuildMI(BB, MII, DL, MCID);
}
/// BuildMI - This version of the builder inserts the newly-built
/// instruction at the end of the given MachineBasicBlock, and does NOT take a
/// destination register.
///
inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
DebugLoc DL,
const MCInstrDesc &MCID) {
return BuildMI(*BB, BB->end(), DL, MCID);
}
/// BuildMI - This version of the builder inserts the newly-built
/// instruction at the end of the given MachineBasicBlock, and sets up the first
/// operand as a destination virtual register.
///
inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
DebugLoc DL,
const MCInstrDesc &MCID,
unsigned DestReg) {
return BuildMI(*BB, BB->end(), DL, MCID, DestReg);
}
/// BuildMI - This version of the builder builds a DBG_VALUE intrinsic
/// for either a value in a register or a register-indirect+offset
/// address. The convention is that a DBG_VALUE is indirect iff the
/// second operand is an immediate.
///
inline MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL,
const MCInstrDesc &MCID, bool IsIndirect,
unsigned Reg, unsigned Offset,
const MDNode *Variable, const MDNode *Expr) {
assert(isa<DILocalVariable>(Variable) && "not a variable");
assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
if (IsIndirect)
return BuildMI(MF, DL, MCID)
.addReg(Reg, RegState::Debug)
.addImm(Offset)
.addMetadata(Variable)
.addMetadata(Expr);
else {
assert(Offset == 0 && "A direct address cannot have an offset.");
return BuildMI(MF, DL, MCID)
.addReg(Reg, RegState::Debug)
.addReg(0U, RegState::Debug)
.addMetadata(Variable)
.addMetadata(Expr);
}
}
/// BuildMI - This version of the builder builds a DBG_VALUE intrinsic
/// for either a value in a register or a register-indirect+offset
/// address and inserts it at position I.
///
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::iterator I, DebugLoc DL,
const MCInstrDesc &MCID, bool IsIndirect,
unsigned Reg, unsigned Offset,
const MDNode *Variable, const MDNode *Expr) {
assert(isa<DILocalVariable>(Variable) && "not a variable");
assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
MachineFunction &MF = *BB.getParent();
MachineInstr *MI =
BuildMI(MF, DL, MCID, IsIndirect, Reg, Offset, Variable, Expr);
BB.insert(I, MI);
return MachineInstrBuilder(MF, MI);
}
inline unsigned getDefRegState(bool B) {
return B ? RegState::Define : 0;
}
inline unsigned getImplRegState(bool B) {
return B ? RegState::Implicit : 0;
}
inline unsigned getKillRegState(bool B) {
return B ? RegState::Kill : 0;
}
inline unsigned getDeadRegState(bool B) {
return B ? RegState::Dead : 0;
}
inline unsigned getUndefRegState(bool B) {
return B ? RegState::Undef : 0;
}
inline unsigned getInternalReadRegState(bool B) {
return B ? RegState::InternalRead : 0;
}
inline unsigned getDebugRegState(bool B) {
return B ? RegState::Debug : 0;
}
/// Helper class for constructing bundles of MachineInstrs.
///
/// MIBundleBuilder can create a bundle from scratch by inserting new
/// MachineInstrs one at a time, or it can create a bundle from a sequence of
/// existing MachineInstrs in a basic block.
class MIBundleBuilder {
MachineBasicBlock &MBB;
MachineBasicBlock::instr_iterator Begin;
MachineBasicBlock::instr_iterator End;
public:
/// Create an MIBundleBuilder that inserts instructions into a new bundle in
/// BB above the bundle or instruction at Pos.
MIBundleBuilder(MachineBasicBlock &BB,
MachineBasicBlock::iterator Pos)
: MBB(BB), Begin(Pos.getInstrIterator()), End(Begin) {}
/// Create a bundle from the sequence of instructions between B and E.
MIBundleBuilder(MachineBasicBlock &BB,
MachineBasicBlock::iterator B,
MachineBasicBlock::iterator E)
: MBB(BB), Begin(B.getInstrIterator()), End(E.getInstrIterator()) {
assert(B != E && "No instructions to bundle");
++B;
while (B != E) {
MachineInstr *MI = B;
++B;
MI->bundleWithPred();
}
}
/// Create an MIBundleBuilder representing an existing instruction or bundle
/// that has MI as its head.
explicit MIBundleBuilder(MachineInstr *MI)
: MBB(*MI->getParent()), Begin(MI), End(getBundleEnd(MI)) {}
/// Return a reference to the basic block containing this bundle.
MachineBasicBlock &getMBB() const { return MBB; }
/// Return true if no instructions have been inserted in this bundle yet.
/// Empty bundles aren't representable in a MachineBasicBlock.
bool empty() const { return Begin == End; }
/// Return an iterator to the first bundled instruction.
MachineBasicBlock::instr_iterator begin() const { return Begin; }
/// Return an iterator beyond the last bundled instruction.
MachineBasicBlock::instr_iterator end() const { return End; }
/// Insert MI into this bundle before I which must point to an instruction in
/// the bundle, or end().
MIBundleBuilder &insert(MachineBasicBlock::instr_iterator I,
MachineInstr *MI) {
MBB.insert(I, MI);
if (I == Begin) {
if (!empty())
MI->bundleWithSucc();
Begin = MI;
return *this;
}
if (I == End) {
MI->bundleWithPred();
return *this;
}
// MI was inserted in the middle of the bundle, so its neighbors' flags are
// already fine. Update MI's bundle flags manually.
MI->setFlag(MachineInstr::BundledPred);
MI->setFlag(MachineInstr::BundledSucc);
return *this;
}
/// Insert MI into MBB by prepending it to the instructions in the bundle.
/// MI will become the first instruction in the bundle.
MIBundleBuilder &prepend(MachineInstr *MI) {
return insert(begin(), MI);
}
/// Insert MI into MBB by appending it to the instructions in the bundle.
/// MI will become the last instruction in the bundle.
MIBundleBuilder &append(MachineInstr *MI) {
return insert(end(), MI);
}
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/WinEHFuncInfo.h | //===-- llvm/CodeGen/WinEHFuncInfo.h ----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Data structures and associated state for Windows exception handling schemes.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_WINEHFUNCINFO_H
#define LLVM_CODEGEN_WINEHFUNCINFO_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/ADT/DenseMap.h"
namespace llvm {
class BasicBlock;
class Constant;
class Function;
class GlobalVariable;
class InvokeInst;
class IntrinsicInst;
class LandingPadInst;
class MCSymbol;
class Value;
enum ActionType { Catch, Cleanup };
class ActionHandler {
public:
ActionHandler(BasicBlock *BB, ActionType Type)
: StartBB(BB), Type(Type), EHState(-1), HandlerBlockOrFunc(nullptr) {}
ActionType getType() const { return Type; }
BasicBlock *getStartBlock() const { return StartBB; }
bool hasBeenProcessed() { return HandlerBlockOrFunc != nullptr; }
void setHandlerBlockOrFunc(Constant *F) { HandlerBlockOrFunc = F; }
Constant *getHandlerBlockOrFunc() { return HandlerBlockOrFunc; }
void setEHState(int State) { EHState = State; }
int getEHState() const { return EHState; }
private:
BasicBlock *StartBB;
ActionType Type;
int EHState;
// Can be either a BlockAddress or a Function depending on the EH personality.
Constant *HandlerBlockOrFunc;
};
class CatchHandler : public ActionHandler {
public:
CatchHandler(BasicBlock *BB, Constant *Selector, BasicBlock *NextBB)
: ActionHandler(BB, ActionType::Catch), Selector(Selector),
NextBB(NextBB), ExceptionObjectVar(nullptr),
ExceptionObjectIndex(-1) {}
// Method for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const ActionHandler *H) {
return H->getType() == ActionType::Catch;
}
Constant *getSelector() const { return Selector; }
BasicBlock *getNextBB() const { return NextBB; }
const Value *getExceptionVar() { return ExceptionObjectVar; }
TinyPtrVector<BasicBlock *> &getReturnTargets() { return ReturnTargets; }
void setExceptionVar(const Value *Val) { ExceptionObjectVar = Val; }
void setExceptionVarIndex(int Index) { ExceptionObjectIndex = Index; }
int getExceptionVarIndex() const { return ExceptionObjectIndex; }
void setReturnTargets(TinyPtrVector<BasicBlock *> &Targets) {
ReturnTargets = Targets;
}
private:
Constant *Selector;
BasicBlock *NextBB;
// While catch handlers are being outlined the ExceptionObjectVar field will
// be populated with the instruction in the parent frame that corresponds
// to the exception object (or nullptr if the catch does not use an
// exception object) and the ExceptionObjectIndex field will be -1.
// When the parseEHActions function is called to populate a vector of
// instances of this class, the ExceptionObjectVar field will be nullptr
// and the ExceptionObjectIndex will be the index of the exception object in
// the parent function's localescape block.
const Value *ExceptionObjectVar;
int ExceptionObjectIndex;
TinyPtrVector<BasicBlock *> ReturnTargets;
};
class CleanupHandler : public ActionHandler {
public:
CleanupHandler(BasicBlock *BB) : ActionHandler(BB, ActionType::Cleanup) {}
// Method for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const ActionHandler *H) {
return H->getType() == ActionType::Cleanup;
}
};
void parseEHActions(const IntrinsicInst *II,
SmallVectorImpl<std::unique_ptr<ActionHandler>> &Actions);
// The following structs respresent the .xdata for functions using C++
// exceptions on Windows.
struct WinEHUnwindMapEntry {
int ToState;
Function *Cleanup;
};
struct WinEHHandlerType {
int Adjectives;
GlobalVariable *TypeDescriptor;
int CatchObjRecoverIdx;
Function *Handler;
};
struct WinEHTryBlockMapEntry {
int TryLow;
int TryHigh;
SmallVector<WinEHHandlerType, 1> HandlerArray;
};
struct WinEHFuncInfo {
DenseMap<const Function *, const LandingPadInst *> RootLPad;
DenseMap<const Function *, const InvokeInst *> LastInvoke;
DenseMap<const Function *, int> HandlerEnclosedState;
DenseMap<const Function *, bool> LastInvokeVisited;
DenseMap<const LandingPadInst *, int> LandingPadStateMap;
DenseMap<const Function *, int> CatchHandlerParentFrameObjIdx;
DenseMap<const Function *, int> CatchHandlerParentFrameObjOffset;
DenseMap<const Function *, int> CatchHandlerMaxState;
DenseMap<const Function *, int> HandlerBaseState;
SmallVector<WinEHUnwindMapEntry, 4> UnwindMap;
SmallVector<WinEHTryBlockMapEntry, 4> TryBlockMap;
SmallVector<std::pair<MCSymbol *, int>, 4> IPToStateList;
int UnwindHelpFrameIdx = INT_MAX;
int UnwindHelpFrameOffset = -1;
unsigned NumIPToStateFuncsVisited = 0;
/// localescape index of the 32-bit EH registration node. Set by
/// WinEHStatePass and used indirectly by SEH filter functions of the parent.
int EHRegNodeEscapeIndex = INT_MAX;
WinEHFuncInfo() {}
};
/// Analyze the IR in ParentFn and it's handlers to build WinEHFuncInfo, which
/// describes the state numbers and tables used by __CxxFrameHandler3. This
/// analysis assumes that WinEHPrepare has already been run.
void calculateWinCXXEHStateNumbers(const Function *ParentFn,
WinEHFuncInfo &FuncInfo);
}
#endif // LLVM_CODEGEN_WINEHFUNCINFO_H
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/LatencyPriorityQueue.h | //===---- LatencyPriorityQueue.h - A latency-oriented priority queue ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the LatencyPriorityQueue class, which is a
// SchedulingPriorityQueue that schedules using latency information to
// reduce the length of the critical path through the basic block.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LATENCYPRIORITYQUEUE_H
#define LLVM_CODEGEN_LATENCYPRIORITYQUEUE_H
#include "llvm/CodeGen/ScheduleDAG.h"
namespace llvm {
class LatencyPriorityQueue;
/// Sorting functions for the Available queue.
struct latency_sort : public std::binary_function<SUnit*, SUnit*, bool> {
LatencyPriorityQueue *PQ;
explicit latency_sort(LatencyPriorityQueue *pq) : PQ(pq) {}
bool operator()(const SUnit* left, const SUnit* right) const;
};
class LatencyPriorityQueue : public SchedulingPriorityQueue {
// SUnits - The SUnits for the current graph.
std::vector<SUnit> *SUnits;
/// NumNodesSolelyBlocking - This vector contains, for every node in the
/// Queue, the number of nodes that the node is the sole unscheduled
/// predecessor for. This is used as a tie-breaker heuristic for better
/// mobility.
std::vector<unsigned> NumNodesSolelyBlocking;
/// Queue - The queue.
std::vector<SUnit*> Queue;
latency_sort Picker;
public:
LatencyPriorityQueue() : Picker(this) {
}
bool isBottomUp() const override { return false; }
void initNodes(std::vector<SUnit> &sunits) override {
SUnits = &sunits;
NumNodesSolelyBlocking.resize(SUnits->size(), 0);
}
void addNode(const SUnit *SU) override {
NumNodesSolelyBlocking.resize(SUnits->size(), 0);
}
void updateNode(const SUnit *SU) override {
}
void releaseState() override {
SUnits = nullptr;
}
unsigned getLatency(unsigned NodeNum) const {
assert(NodeNum < (*SUnits).size());
return (*SUnits)[NodeNum].getHeight();
}
unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
assert(NodeNum < NumNodesSolelyBlocking.size());
return NumNodesSolelyBlocking[NodeNum];
}
bool empty() const override { return Queue.empty(); }
void push(SUnit *U) override;
SUnit *pop() override;
void remove(SUnit *SU) override;
// scheduledNode - As nodes are scheduled, we look to see if there are any
// successor nodes that have a single unscheduled predecessor. If so, that
// single predecessor has a higher priority, since scheduling it will make
// the node available.
void scheduledNode(SUnit *Node) override;
private:
void AdjustPriorityOfUnscheduledPreds(SUnit *SU);
SUnit *getSingleUnscheduledPred(SUnit *SU);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/ScheduleDFS.h | //===- ScheduleDAGILP.h - ILP metric for ScheduleDAGInstrs ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Definition of an ILP metric for machine level instruction scheduling.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_SCHEDULEDFS_H
#define LLVM_CODEGEN_SCHEDULEDFS_H
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/Support/DataTypes.h"
#include <vector>
namespace llvm {
class raw_ostream;
class IntEqClasses;
class ScheduleDAGInstrs;
class SUnit;
/// \brief Represent the ILP of the subDAG rooted at a DAG node.
///
/// ILPValues summarize the DAG subtree rooted at each node. ILPValues are
/// valid for all nodes regardless of their subtree membership.
///
/// When computed using bottom-up DFS, this metric assumes that the DAG is a
/// forest of trees with roots at the bottom of the schedule branching upward.
struct ILPValue {
unsigned InstrCount;
/// Length may either correspond to depth or height, depending on direction,
/// and cycles or nodes depending on context.
unsigned Length;
ILPValue(unsigned count, unsigned length):
InstrCount(count), Length(length) {}
// Order by the ILP metric's value.
bool operator<(ILPValue RHS) const {
return (uint64_t)InstrCount * RHS.Length
< (uint64_t)Length * RHS.InstrCount;
}
bool operator>(ILPValue RHS) const {
return RHS < *this;
}
bool operator<=(ILPValue RHS) const {
return (uint64_t)InstrCount * RHS.Length
<= (uint64_t)Length * RHS.InstrCount;
}
bool operator>=(ILPValue RHS) const {
return RHS <= *this;
}
void print(raw_ostream &OS) const;
void dump() const;
};
/// \brief Compute the values of each DAG node for various metrics during DFS.
class SchedDFSResult {
friend class SchedDFSImpl;
static const unsigned InvalidSubtreeID = ~0u;
/// \brief Per-SUnit data computed during DFS for various metrics.
///
/// A node's SubtreeID is set to itself when it is visited to indicate that it
/// is the root of a subtree. Later it is set to its parent to indicate an
/// interior node. Finally, it is set to a representative subtree ID during
/// finalization.
struct NodeData {
unsigned InstrCount;
unsigned SubtreeID;
NodeData(): InstrCount(0), SubtreeID(InvalidSubtreeID) {}
};
/// \brief Per-Subtree data computed during DFS.
struct TreeData {
unsigned ParentTreeID;
unsigned SubInstrCount;
TreeData(): ParentTreeID(InvalidSubtreeID), SubInstrCount(0) {}
};
/// \brief Record a connection between subtrees and the connection level.
struct Connection {
unsigned TreeID;
unsigned Level;
Connection(unsigned tree, unsigned level): TreeID(tree), Level(level) {}
};
bool IsBottomUp;
unsigned SubtreeLimit;
/// DFS results for each SUnit in this DAG.
std::vector<NodeData> DFSNodeData;
// Store per-tree data indexed on tree ID,
SmallVector<TreeData, 16> DFSTreeData;
// For each subtree discovered during DFS, record its connections to other
// subtrees.
std::vector<SmallVector<Connection, 4> > SubtreeConnections;
/// Cache the current connection level of each subtree.
/// This mutable array is updated during scheduling.
std::vector<unsigned> SubtreeConnectLevels;
public:
SchedDFSResult(bool IsBU, unsigned lim)
: IsBottomUp(IsBU), SubtreeLimit(lim) {}
/// \brief Get the node cutoff before subtrees are considered significant.
unsigned getSubtreeLimit() const { return SubtreeLimit; }
/// \brief Return true if this DFSResult is uninitialized.
///
/// resize() initializes DFSResult, while compute() populates it.
bool empty() const { return DFSNodeData.empty(); }
/// \brief Clear the results.
void clear() {
DFSNodeData.clear();
DFSTreeData.clear();
SubtreeConnections.clear();
SubtreeConnectLevels.clear();
}
/// \brief Initialize the result data with the size of the DAG.
void resize(unsigned NumSUnits) {
DFSNodeData.resize(NumSUnits);
}
/// \brief Compute various metrics for the DAG with given roots.
void compute(ArrayRef<SUnit> SUnits);
/// \brief Get the number of instructions in the given subtree and its
/// children.
unsigned getNumInstrs(const SUnit *SU) const {
return DFSNodeData[SU->NodeNum].InstrCount;
}
/// \brief Get the number of instructions in the given subtree not including
/// children.
unsigned getNumSubInstrs(unsigned SubtreeID) const {
return DFSTreeData[SubtreeID].SubInstrCount;
}
/// \brief Get the ILP value for a DAG node.
///
/// A leaf node has an ILP of 1/1.
ILPValue getILP(const SUnit *SU) const {
return ILPValue(DFSNodeData[SU->NodeNum].InstrCount, 1 + SU->getDepth());
}
/// \brief The number of subtrees detected in this DAG.
unsigned getNumSubtrees() const { return SubtreeConnectLevels.size(); }
/// \brief Get the ID of the subtree the given DAG node belongs to.
///
/// For convenience, if DFSResults have not been computed yet, give everything
/// tree ID 0.
unsigned getSubtreeID(const SUnit *SU) const {
if (empty())
return 0;
assert(SU->NodeNum < DFSNodeData.size() && "New Node");
return DFSNodeData[SU->NodeNum].SubtreeID;
}
/// \brief Get the connection level of a subtree.
///
/// For bottom-up trees, the connection level is the latency depth (in cycles)
/// of the deepest connection to another subtree.
unsigned getSubtreeLevel(unsigned SubtreeID) const {
return SubtreeConnectLevels[SubtreeID];
}
/// \brief Scheduler callback to update SubtreeConnectLevels when a tree is
/// initially scheduled.
void scheduleTree(unsigned SubtreeID);
};
raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val);
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/GCMetadata.h | //===-- GCMetadata.h - Garbage collector metadata ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the GCFunctionInfo and GCModuleInfo classes, which are
// used as a communication channel from the target code generator to the target
// garbage collectors. This interface allows code generators and garbage
// collectors to be developed independently.
//
// The GCFunctionInfo class logs the data necessary to build a type accurate
// stack map. The code generator outputs:
//
// - Safe points as specified by the GCStrategy's NeededSafePoints.
// - Stack offsets for GC roots, as specified by calls to llvm.gcroot
//
// As a refinement, liveness analysis calculates the set of live roots at each
// safe point. Liveness analysis is not presently performed by the code
// generator, so all roots are assumed live.
//
// GCModuleInfo simply collects GCFunctionInfo instances for each Function as
// they are compiled. This accretion is necessary for collectors which must emit
// a stack map for the compilation unit as a whole. Therefore, GCFunctionInfo
// outlives the MachineFunction from which it is derived and must not refer to
// any code generator data structures.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_GCMETADATA_H
#define LLVM_CODEGEN_GCMETADATA_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/Pass.h"
#include <memory>
namespace llvm {
class AsmPrinter;
class Constant;
class MCSymbol;
/// GCPoint - Metadata for a collector-safe point in machine code.
///
struct GCPoint {
GC::PointKind Kind; ///< The kind of the safe point.
MCSymbol *Label; ///< A label.
DebugLoc Loc;
GCPoint(GC::PointKind K, MCSymbol *L, DebugLoc DL)
: Kind(K), Label(L), Loc(DL) {}
};
/// GCRoot - Metadata for a pointer to an object managed by the garbage
/// collector.
struct GCRoot {
int Num; ///< Usually a frame index.
int StackOffset; ///< Offset from the stack pointer.
const Constant *Metadata; ///< Metadata straight from the call
///< to llvm.gcroot.
GCRoot(int N, const Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {}
};
/// Garbage collection metadata for a single function. Currently, this
/// information only applies to GCStrategies which use GCRoot.
class GCFunctionInfo {
public:
typedef std::vector<GCPoint>::iterator iterator;
typedef std::vector<GCRoot>::iterator roots_iterator;
typedef std::vector<GCRoot>::const_iterator live_iterator;
private:
const Function &F;
GCStrategy &S;
uint64_t FrameSize;
std::vector<GCRoot> Roots;
std::vector<GCPoint> SafePoints;
// FIXME: Liveness. A 2D BitVector, perhaps?
//
// BitVector Liveness;
//
// bool islive(int point, int root) =
// Liveness[point * SafePoints.size() + root]
//
// The bit vector is the more compact representation where >3.2% of roots
// are live per safe point (1.5% on 64-bit hosts).
public:
GCFunctionInfo(const Function &F, GCStrategy &S);
~GCFunctionInfo();
/// getFunction - Return the function to which this metadata applies.
///
const Function &getFunction() const { return F; }
/// getStrategy - Return the GC strategy for the function.
///
GCStrategy &getStrategy() { return S; }
/// addStackRoot - Registers a root that lives on the stack. Num is the
/// stack object ID for the alloca (if the code generator is
// using MachineFrameInfo).
void addStackRoot(int Num, const Constant *Metadata) {
Roots.push_back(GCRoot(Num, Metadata));
}
/// removeStackRoot - Removes a root.
roots_iterator removeStackRoot(roots_iterator position) {
return Roots.erase(position);
}
/// addSafePoint - Notes the existence of a safe point. Num is the ID of the
/// label just prior to the safe point (if the code generator is using
/// MachineModuleInfo).
void addSafePoint(GC::PointKind Kind, MCSymbol *Label, DebugLoc DL) {
SafePoints.emplace_back(Kind, Label, DL);
}
/// getFrameSize/setFrameSize - Records the function's frame size.
///
uint64_t getFrameSize() const { return FrameSize; }
void setFrameSize(uint64_t S) { FrameSize = S; }
/// begin/end - Iterators for safe points.
///
iterator begin() { return SafePoints.begin(); }
iterator end() { return SafePoints.end(); }
size_t size() const { return SafePoints.size(); }
/// roots_begin/roots_end - Iterators for all roots in the function.
///
roots_iterator roots_begin() { return Roots.begin(); }
roots_iterator roots_end() { return Roots.end(); }
size_t roots_size() const { return Roots.size(); }
/// live_begin/live_end - Iterators for live roots at a given safe point.
///
live_iterator live_begin(const iterator &p) { return roots_begin(); }
live_iterator live_end(const iterator &p) { return roots_end(); }
size_t live_size(const iterator &p) const { return roots_size(); }
};
/// An analysis pass which caches information about the entire Module.
/// Records both the function level information used by GCRoots and a
/// cache of the 'active' gc strategy objects for the current Module.
class GCModuleInfo : public ImmutablePass {
/// An owning list of all GCStrategies which have been created
SmallVector<std::unique_ptr<GCStrategy>, 1> GCStrategyList;
/// A helper map to speedup lookups into the above list
StringMap<GCStrategy*> GCStrategyMap;
public:
/// Lookup the GCStrategy object associated with the given gc name.
/// Objects are owned internally; No caller should attempt to delete the
/// returned objects.
GCStrategy *getGCStrategy(const StringRef Name);
/// List of per function info objects. In theory, Each of these
/// may be associated with a different GC.
typedef std::vector<std::unique_ptr<GCFunctionInfo>> FuncInfoVec;
FuncInfoVec::iterator funcinfo_begin() { return Functions.begin(); }
FuncInfoVec::iterator funcinfo_end() { return Functions.end(); }
private:
/// Owning list of all GCFunctionInfos associated with this Module
FuncInfoVec Functions;
/// Non-owning map to bypass linear search when finding the GCFunctionInfo
/// associated with a particular Function.
typedef DenseMap<const Function *, GCFunctionInfo *> finfo_map_type;
finfo_map_type FInfoMap;
public:
typedef SmallVector<std::unique_ptr<GCStrategy>,1>::const_iterator iterator;
static char ID;
GCModuleInfo();
/// clear - Resets the pass. Any pass, which uses GCModuleInfo, should
/// call it in doFinalization().
///
void clear();
/// begin/end - Iterators for used strategies.
///
iterator begin() const { return GCStrategyList.begin(); }
iterator end() const { return GCStrategyList.end(); }
/// get - Look up function metadata. This is currently assumed
/// have the side effect of initializing the associated GCStrategy. That
/// will soon change.
GCFunctionInfo &getFunctionInfo(const Function &F);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineModuleInfoImpls.h | //===-- llvm/CodeGen/MachineModuleInfoImpls.h -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines object-file format specific implementations of
// MachineModuleInfoImpl.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
#define LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
#include "llvm/CodeGen/MachineModuleInfo.h"
namespace llvm {
class MCSymbol;
/// MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation
/// for MachO targets.
class MachineModuleInfoMachO : public MachineModuleInfoImpl {
/// FnStubs - Darwin '$stub' stubs. The key is something like "Lfoo$stub",
/// the value is something like "_foo".
DenseMap<MCSymbol*, StubValueTy> FnStubs;
/// GVStubs - Darwin '$non_lazy_ptr' stubs. The key is something like
/// "Lfoo$non_lazy_ptr", the value is something like "_foo". The extra bit
/// is true if this GV is external.
DenseMap<MCSymbol*, StubValueTy> GVStubs;
/// HiddenGVStubs - Darwin '$non_lazy_ptr' stubs. The key is something like
/// "Lfoo$non_lazy_ptr", the value is something like "_foo". Unlike GVStubs
/// these are for things with hidden visibility. The extra bit is true if
/// this GV is external.
DenseMap<MCSymbol*, StubValueTy> HiddenGVStubs;
virtual void anchor(); // Out of line virtual method.
public:
MachineModuleInfoMachO(const MachineModuleInfo &) {}
StubValueTy &getFnStubEntry(MCSymbol *Sym) {
assert(Sym && "Key cannot be null");
return FnStubs[Sym];
}
StubValueTy &getGVStubEntry(MCSymbol *Sym) {
assert(Sym && "Key cannot be null");
return GVStubs[Sym];
}
StubValueTy &getHiddenGVStubEntry(MCSymbol *Sym) {
assert(Sym && "Key cannot be null");
return HiddenGVStubs[Sym];
}
/// Accessor methods to return the set of stubs in sorted order.
SymbolListTy GetFnStubList() {
return getSortedStubs(FnStubs);
}
SymbolListTy GetGVStubList() {
return getSortedStubs(GVStubs);
}
SymbolListTy GetHiddenGVStubList() {
return getSortedStubs(HiddenGVStubs);
}
};
/// MachineModuleInfoELF - This is a MachineModuleInfoImpl implementation
/// for ELF targets.
class MachineModuleInfoELF : public MachineModuleInfoImpl {
/// GVStubs - These stubs are used to materialize global addresses in PIC
/// mode.
DenseMap<MCSymbol*, StubValueTy> GVStubs;
virtual void anchor(); // Out of line virtual method.
public:
MachineModuleInfoELF(const MachineModuleInfo &) {}
StubValueTy &getGVStubEntry(MCSymbol *Sym) {
assert(Sym && "Key cannot be null");
return GVStubs[Sym];
}
/// Accessor methods to return the set of stubs in sorted order.
SymbolListTy GetGVStubList() {
return getSortedStubs(GVStubs);
}
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineCombinerPattern.h | //===-- llvm/CodeGen/MachineCombinerPattern.h - Instruction pattern supported by
// combiner ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines instruction pattern supported by combiner
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINECOMBINERPATTERN_H
#define LLVM_CODEGEN_MACHINECOMBINERPATTERN_H
namespace llvm {
/// Enumeration of instruction pattern supported by machine combiner
///
///
namespace MachineCombinerPattern {
// Forward declaration
enum MC_PATTERN : int;
} // end namespace MachineCombinerPattern
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineInstr.h | //===-- llvm/CodeGen/MachineInstr.h - MachineInstr class --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MachineInstr class, which is the
// basic representation for all target dependent machine instructions used by
// the back end.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEINSTR_H
#define LLVM_CODEGEN_MACHINEINSTR_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/ArrayRecycler.h"
#include "llvm/Target/TargetOpcodes.h"
namespace llvm {
template <typename T> class SmallVectorImpl;
class AliasAnalysis;
class TargetInstrInfo;
class TargetRegisterClass;
class TargetRegisterInfo;
class MachineFunction;
class MachineMemOperand;
//===----------------------------------------------------------------------===//
/// Representation of each machine instruction.
///
/// This class isn't a POD type, but it must have a trivial destructor. When a
/// MachineFunction is deleted, all the contained MachineInstrs are deallocated
/// without having their destructor called.
///
class MachineInstr : public ilist_node<MachineInstr> {
public:
typedef MachineMemOperand **mmo_iterator;
/// Flags to specify different kinds of comments to output in
/// assembly code. These flags carry semantic information not
/// otherwise easily derivable from the IR text.
///
enum CommentFlag {
ReloadReuse = 0x1
};
enum MIFlag {
NoFlags = 0,
FrameSetup = 1 << 0, // Instruction is used as a part of
// function frame setup code.
BundledPred = 1 << 1, // Instruction has bundled predecessors.
BundledSucc = 1 << 2 // Instruction has bundled successors.
};
private:
const MCInstrDesc *MCID; // Instruction descriptor.
MachineBasicBlock *Parent; // Pointer to the owning basic block.
// Operands are allocated by an ArrayRecycler.
MachineOperand *Operands; // Pointer to the first operand.
unsigned NumOperands; // Number of operands on instruction.
typedef ArrayRecycler<MachineOperand>::Capacity OperandCapacity;
OperandCapacity CapOperands; // Capacity of the Operands array.
uint8_t Flags; // Various bits of additional
// information about machine
// instruction.
uint8_t AsmPrinterFlags; // Various bits of information used by
// the AsmPrinter to emit helpful
// comments. This is *not* semantic
// information. Do not use this for
// anything other than to convey comment
// information to AsmPrinter.
uint8_t NumMemRefs; // Information on memory references.
mmo_iterator MemRefs;
DebugLoc debugLoc; // Source line information.
MachineInstr(const MachineInstr&) = delete;
void operator=(const MachineInstr&) = delete;
// Use MachineFunction::DeleteMachineInstr() instead.
~MachineInstr() = delete;
// Intrusive list support
friend struct ilist_traits<MachineInstr>;
friend struct ilist_traits<MachineBasicBlock>;
void setParent(MachineBasicBlock *P) { Parent = P; }
/// This constructor creates a copy of the given
/// MachineInstr in the given MachineFunction.
MachineInstr(MachineFunction &, const MachineInstr &);
/// This constructor create a MachineInstr and add the implicit operands.
/// It reserves space for number of operands specified by
/// MCInstrDesc. An explicit DebugLoc is supplied.
MachineInstr(MachineFunction &, const MCInstrDesc &MCID, DebugLoc dl,
bool NoImp = false);
// MachineInstrs are pool-allocated and owned by MachineFunction.
friend class MachineFunction;
public:
const MachineBasicBlock* getParent() const { return Parent; }
MachineBasicBlock* getParent() { return Parent; }
/// Return the asm printer flags bitvector.
uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; }
/// Clear the AsmPrinter bitvector.
void clearAsmPrinterFlags() { AsmPrinterFlags = 0; }
/// Return whether an AsmPrinter flag is set.
bool getAsmPrinterFlag(CommentFlag Flag) const {
return AsmPrinterFlags & Flag;
}
/// Set a flag for the AsmPrinter.
void setAsmPrinterFlag(CommentFlag Flag) {
AsmPrinterFlags |= (uint8_t)Flag;
}
/// Clear specific AsmPrinter flags.
void clearAsmPrinterFlag(CommentFlag Flag) {
AsmPrinterFlags &= ~Flag;
}
/// Return the MI flags bitvector.
uint8_t getFlags() const {
return Flags;
}
/// Return whether an MI flag is set.
bool getFlag(MIFlag Flag) const {
return Flags & Flag;
}
/// Set a MI flag.
void setFlag(MIFlag Flag) {
Flags |= (uint8_t)Flag;
}
void setFlags(unsigned flags) {
// Filter out the automatically maintained flags.
unsigned Mask = BundledPred | BundledSucc;
Flags = (Flags & Mask) | (flags & ~Mask);
}
/// clearFlag - Clear a MI flag.
void clearFlag(MIFlag Flag) {
Flags &= ~((uint8_t)Flag);
}
/// Return true if MI is in a bundle (but not the first MI in a bundle).
///
/// A bundle looks like this before it's finalized:
/// ----------------
/// | MI |
/// ----------------
/// |
/// ----------------
/// | MI * |
/// ----------------
/// |
/// ----------------
/// | MI * |
/// ----------------
/// In this case, the first MI starts a bundle but is not inside a bundle, the
/// next 2 MIs are considered "inside" the bundle.
///
/// After a bundle is finalized, it looks like this:
/// ----------------
/// | Bundle |
/// ----------------
/// |
/// ----------------
/// | MI * |
/// ----------------
/// |
/// ----------------
/// | MI * |
/// ----------------
/// |
/// ----------------
/// | MI * |
/// ----------------
/// The first instruction has the special opcode "BUNDLE". It's not "inside"
/// a bundle, but the next three MIs are.
bool isInsideBundle() const {
return getFlag(BundledPred);
}
/// Return true if this instruction part of a bundle. This is true
/// if either itself or its following instruction is marked "InsideBundle".
bool isBundled() const {
return isBundledWithPred() || isBundledWithSucc();
}
/// Return true if this instruction is part of a bundle, and it is not the
/// first instruction in the bundle.
bool isBundledWithPred() const { return getFlag(BundledPred); }
/// Return true if this instruction is part of a bundle, and it is not the
/// last instruction in the bundle.
bool isBundledWithSucc() const { return getFlag(BundledSucc); }
/// Bundle this instruction with its predecessor. This can be an unbundled
/// instruction, or it can be the first instruction in a bundle.
void bundleWithPred();
/// Bundle this instruction with its successor. This can be an unbundled
/// instruction, or it can be the last instruction in a bundle.
void bundleWithSucc();
/// Break bundle above this instruction.
void unbundleFromPred();
/// Break bundle below this instruction.
void unbundleFromSucc();
/// Returns the debug location id of this MachineInstr.
const DebugLoc &getDebugLoc() const { return debugLoc; }
/// Return the debug variable referenced by
/// this DBG_VALUE instruction.
const DILocalVariable *getDebugVariable() const {
assert(isDebugValue() && "not a DBG_VALUE");
return cast<DILocalVariable>(getOperand(2).getMetadata());
}
/// Return the complex address expression referenced by
/// this DBG_VALUE instruction.
const DIExpression *getDebugExpression() const {
assert(isDebugValue() && "not a DBG_VALUE");
return cast<DIExpression>(getOperand(3).getMetadata());
}
/// Emit an error referring to the source location of this instruction.
/// This should only be used for inline assembly that is somehow
/// impossible to compile. Other errors should have been handled much
/// earlier.
///
/// If this method returns, the caller should try to recover from the error.
///
void emitError(StringRef Msg) const;
/// Returns the target instruction descriptor of this MachineInstr.
const MCInstrDesc &getDesc() const { return *MCID; }
/// Returns the opcode of this MachineInstr.
unsigned getOpcode() const { return MCID->Opcode; }
/// Access to explicit operands of the instruction.
///
unsigned getNumOperands() const { return NumOperands; }
const MachineOperand& getOperand(unsigned i) const {
assert(i < getNumOperands() && "getOperand() out of range!");
return Operands[i];
}
MachineOperand& getOperand(unsigned i) {
assert(i < getNumOperands() && "getOperand() out of range!");
return Operands[i];
}
/// Returns the number of non-implicit operands.
unsigned getNumExplicitOperands() const;
/// iterator/begin/end - Iterate over all operands of a machine instruction.
typedef MachineOperand *mop_iterator;
typedef const MachineOperand *const_mop_iterator;
mop_iterator operands_begin() { return Operands; }
mop_iterator operands_end() { return Operands + NumOperands; }
const_mop_iterator operands_begin() const { return Operands; }
const_mop_iterator operands_end() const { return Operands + NumOperands; }
iterator_range<mop_iterator> operands() {
return iterator_range<mop_iterator>(operands_begin(), operands_end());
}
iterator_range<const_mop_iterator> operands() const {
return iterator_range<const_mop_iterator>(operands_begin(), operands_end());
}
iterator_range<mop_iterator> explicit_operands() {
return iterator_range<mop_iterator>(
operands_begin(), operands_begin() + getNumExplicitOperands());
}
iterator_range<const_mop_iterator> explicit_operands() const {
return iterator_range<const_mop_iterator>(
operands_begin(), operands_begin() + getNumExplicitOperands());
}
iterator_range<mop_iterator> implicit_operands() {
return iterator_range<mop_iterator>(explicit_operands().end(),
operands_end());
}
iterator_range<const_mop_iterator> implicit_operands() const {
return iterator_range<const_mop_iterator>(explicit_operands().end(),
operands_end());
}
iterator_range<mop_iterator> defs() {
return iterator_range<mop_iterator>(
operands_begin(), operands_begin() + getDesc().getNumDefs());
}
iterator_range<const_mop_iterator> defs() const {
return iterator_range<const_mop_iterator>(
operands_begin(), operands_begin() + getDesc().getNumDefs());
}
iterator_range<mop_iterator> uses() {
return iterator_range<mop_iterator>(
operands_begin() + getDesc().getNumDefs(), operands_end());
}
iterator_range<const_mop_iterator> uses() const {
return iterator_range<const_mop_iterator>(
operands_begin() + getDesc().getNumDefs(), operands_end());
}
/// Returns the number of the operand iterator \p I points to.
unsigned getOperandNo(const_mop_iterator I) const {
return I - operands_begin();
}
/// Access to memory operands of the instruction
mmo_iterator memoperands_begin() const { return MemRefs; }
mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; }
bool memoperands_empty() const { return NumMemRefs == 0; }
iterator_range<mmo_iterator> memoperands() {
return iterator_range<mmo_iterator>(memoperands_begin(), memoperands_end());
}
iterator_range<mmo_iterator> memoperands() const {
return iterator_range<mmo_iterator>(memoperands_begin(), memoperands_end());
}
/// Return true if this instruction has exactly one MachineMemOperand.
bool hasOneMemOperand() const {
return NumMemRefs == 1;
}
/// API for querying MachineInstr properties. They are the same as MCInstrDesc
/// queries but they are bundle aware.
enum QueryType {
IgnoreBundle, // Ignore bundles
AnyInBundle, // Return true if any instruction in bundle has property
AllInBundle // Return true if all instructions in bundle have property
};
/// Return true if the instruction (or in the case of a bundle,
/// the instructions inside the bundle) has the specified property.
/// The first argument is the property being queried.
/// The second argument indicates whether the query should look inside
/// instruction bundles.
bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
// Inline the fast path for unbundled or bundle-internal instructions.
if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
return getDesc().getFlags() & (1 << MCFlag);
// If this is the first instruction in a bundle, take the slow path.
return hasPropertyInBundle(1 << MCFlag, Type);
}
/// Return true if this instruction can have a variable number of operands.
/// In this case, the variable operands will be after the normal
/// operands but before the implicit definitions and uses (if any are
/// present).
bool isVariadic(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Variadic, Type);
}
/// Set if this instruction has an optional definition, e.g.
/// ARM instructions which can set condition code if 's' bit is set.
bool hasOptionalDef(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::HasOptionalDef, Type);
}
/// Return true if this is a pseudo instruction that doesn't
/// correspond to a real machine instruction.
bool isPseudo(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Pseudo, Type);
}
bool isReturn(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::Return, Type);
}
bool isCall(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::Call, Type);
}
/// Returns true if the specified instruction stops control flow
/// from executing the instruction immediately following it. Examples include
/// unconditional branches and return instructions.
bool isBarrier(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::Barrier, Type);
}
/// Returns true if this instruction part of the terminator for a basic block.
/// Typically this is things like return and branch instructions.
///
/// Various passes use this to insert code into the bottom of a basic block,
/// but before control flow occurs.
bool isTerminator(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::Terminator, Type);
}
/// Returns true if this is a conditional, unconditional, or indirect branch.
/// Predicates below can be used to discriminate between
/// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
/// get more information.
bool isBranch(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::Branch, Type);
}
/// Return true if this is an indirect branch, such as a
/// branch through a register.
bool isIndirectBranch(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::IndirectBranch, Type);
}
/// Return true if this is a branch which may fall
/// through to the next instruction or may transfer control flow to some other
/// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more
/// information about this branch.
bool isConditionalBranch(QueryType Type = AnyInBundle) const {
return isBranch(Type) && !isBarrier(Type) && !isIndirectBranch(Type);
}
/// Return true if this is a branch which always
/// transfers control flow to some other block. The
/// TargetInstrInfo::AnalyzeBranch method can be used to get more information
/// about this branch.
bool isUnconditionalBranch(QueryType Type = AnyInBundle) const {
return isBranch(Type) && isBarrier(Type) && !isIndirectBranch(Type);
}
/// Return true if this instruction has a predicate operand that
/// controls execution. It may be set to 'always', or may be set to other
/// values. There are various methods in TargetInstrInfo that can be used to
/// control and modify the predicate in this instruction.
bool isPredicable(QueryType Type = AllInBundle) const {
// If it's a bundle than all bundled instructions must be predicable for this
// to return true.
return hasProperty(MCID::Predicable, Type);
}
/// Return true if this instruction is a comparison.
bool isCompare(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Compare, Type);
}
/// Return true if this instruction is a move immediate
/// (including conditional moves) instruction.
bool isMoveImmediate(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::MoveImm, Type);
}
/// Return true if this instruction is a bitcast instruction.
bool isBitcast(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Bitcast, Type);
}
/// Return true if this instruction is a select instruction.
bool isSelect(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Select, Type);
}
/// Return true if this instruction cannot be safely duplicated.
/// For example, if the instruction has a unique labels attached
/// to it, duplicating it would cause multiple definition errors.
bool isNotDuplicable(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::NotDuplicable, Type);
}
/// Return true if this instruction is convergent.
/// Convergent instructions can only be moved to locations that are
/// control-equivalent to their initial position.
bool isConvergent(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::Convergent, Type);
}
/// Returns true if the specified instruction has a delay slot
/// which must be filled by the code generator.
bool hasDelaySlot(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::DelaySlot, Type);
}
/// Return true for instructions that can be folded as
/// memory operands in other instructions. The most common use for this
/// is instructions that are simple loads from memory that don't modify
/// the loaded value in any way, but it can also be used for instructions
/// that can be expressed as constant-pool loads, such as V_SETALLONES
/// on x86, to allow them to be folded when it is beneficial.
/// This should only be set on instructions that return a value in their
/// only virtual register definition.
bool canFoldAsLoad(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::FoldableAsLoad, Type);
}
/// \brief Return true if this instruction behaves
/// the same way as the generic REG_SEQUENCE instructions.
/// E.g., on ARM,
/// dX VMOVDRR rY, rZ
/// is equivalent to
/// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
///
/// Note that for the optimizers to be able to take advantage of
/// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
/// override accordingly.
bool isRegSequenceLike(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::RegSequence, Type);
}
/// \brief Return true if this instruction behaves
/// the same way as the generic EXTRACT_SUBREG instructions.
/// E.g., on ARM,
/// rX, rY VMOVRRD dZ
/// is equivalent to two EXTRACT_SUBREG:
/// rX = EXTRACT_SUBREG dZ, ssub_0
/// rY = EXTRACT_SUBREG dZ, ssub_1
///
/// Note that for the optimizers to be able to take advantage of
/// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
/// override accordingly.
bool isExtractSubregLike(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::ExtractSubreg, Type);
}
/// \brief Return true if this instruction behaves
/// the same way as the generic INSERT_SUBREG instructions.
/// E.g., on ARM,
/// dX = VSETLNi32 dY, rZ, Imm
/// is equivalent to a INSERT_SUBREG:
/// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
///
/// Note that for the optimizers to be able to take advantage of
/// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
/// override accordingly.
bool isInsertSubregLike(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::InsertSubreg, Type);
}
//===--------------------------------------------------------------------===//
// Side Effect Analysis
//===--------------------------------------------------------------------===//
/// Return true if this instruction could possibly read memory.
/// Instructions with this flag set are not necessarily simple load
/// instructions, they may load a value and modify it, for example.
bool mayLoad(QueryType Type = AnyInBundle) const {
if (isInlineAsm()) {
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_MayLoad)
return true;
}
return hasProperty(MCID::MayLoad, Type);
}
/// Return true if this instruction could possibly modify memory.
/// Instructions with this flag set are not necessarily simple store
/// instructions, they may store a modified value based on their operands, or
/// may not actually modify anything, for example.
bool mayStore(QueryType Type = AnyInBundle) const {
if (isInlineAsm()) {
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_MayStore)
return true;
}
return hasProperty(MCID::MayStore, Type);
}
/// Return true if this instruction could possibly read or modify memory.
bool mayLoadOrStore(QueryType Type = AnyInBundle) const {
return mayLoad(Type) || mayStore(Type);
}
//===--------------------------------------------------------------------===//
// Flags that indicate whether an instruction can be modified by a method.
//===--------------------------------------------------------------------===//
/// Return true if this may be a 2- or 3-address
/// instruction (of the form "X = op Y, Z, ..."), which produces the same
/// result if Y and Z are exchanged. If this flag is set, then the
/// TargetInstrInfo::commuteInstruction method may be used to hack on the
/// instruction.
///
/// Note that this flag may be set on instructions that are only commutable
/// sometimes. In these cases, the call to commuteInstruction will fail.
/// Also note that some instructions require non-trivial modification to
/// commute them.
bool isCommutable(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Commutable, Type);
}
/// Return true if this is a 2-address instruction
/// which can be changed into a 3-address instruction if needed. Doing this
/// transformation can be profitable in the register allocator, because it
/// means that the instruction can use a 2-address form if possible, but
/// degrade into a less efficient form if the source and dest register cannot
/// be assigned to the same register. For example, this allows the x86
/// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
/// is the same speed as the shift but has bigger code size.
///
/// If this returns true, then the target must implement the
/// TargetInstrInfo::convertToThreeAddress method for this instruction, which
/// is allowed to fail if the transformation isn't valid for this specific
/// instruction (e.g. shl reg, 4 on x86).
///
bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::ConvertibleTo3Addr, Type);
}
/// Return true if this instruction requires
/// custom insertion support when the DAG scheduler is inserting it into a
/// machine basic block. If this is true for the instruction, it basically
/// means that it is a pseudo instruction used at SelectionDAG time that is
/// expanded out into magic code by the target when MachineInstrs are formed.
///
/// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
/// is used to insert this into the MachineBasicBlock.
bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::UsesCustomInserter, Type);
}
/// Return true if this instruction requires *adjustment*
/// after instruction selection by calling a target hook. For example, this
/// can be used to fill in ARM 's' optional operand depending on whether
/// the conditional flag register is used.
bool hasPostISelHook(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::HasPostISelHook, Type);
}
/// Returns true if this instruction is a candidate for remat.
/// This flag is deprecated, please don't use it anymore. If this
/// flag is set, the isReallyTriviallyReMaterializable() method is called to
/// verify the instruction is really rematable.
bool isRematerializable(QueryType Type = AllInBundle) const {
// It's only possible to re-mat a bundle if all bundled instructions are
// re-materializable.
return hasProperty(MCID::Rematerializable, Type);
}
/// Returns true if this instruction has the same cost (or less) than a move
/// instruction. This is useful during certain types of optimizations
/// (e.g., remat during two-address conversion or machine licm)
/// where we would like to remat or hoist the instruction, but not if it costs
/// more than moving the instruction into the appropriate register. Note, we
/// are not marking copies from and to the same register class with this flag.
bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
// Only returns true for a bundle if all bundled instructions are cheap.
return hasProperty(MCID::CheapAsAMove, Type);
}
/// Returns true if this instruction source operands
/// have special register allocation requirements that are not captured by the
/// operand register classes. e.g. ARM::STRD's two source registers must be an
/// even / odd pair, ARM::STM registers have to be in ascending order.
/// Post-register allocation passes should not attempt to change allocations
/// for sources of instructions with this flag.
bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::ExtraSrcRegAllocReq, Type);
}
/// Returns true if this instruction def operands
/// have special register allocation requirements that are not captured by the
/// operand register classes. e.g. ARM::LDRD's two def registers must be an
/// even / odd pair, ARM::LDM registers have to be in ascending order.
/// Post-register allocation passes should not attempt to change allocations
/// for definitions of instructions with this flag.
bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::ExtraDefRegAllocReq, Type);
}
enum MICheckType {
CheckDefs, // Check all operands for equality
CheckKillDead, // Check all operands including kill / dead markers
IgnoreDefs, // Ignore all definitions
IgnoreVRegDefs // Ignore virtual register definitions
};
/// Return true if this instruction is identical to (same
/// opcode and same operands as) the specified instruction.
bool isIdenticalTo(const MachineInstr *Other,
MICheckType Check = CheckDefs) const;
/// Unlink 'this' from the containing basic block, and return it without
/// deleting it.
///
/// This function can not be used on bundled instructions, use
/// removeFromBundle() to remove individual instructions from a bundle.
MachineInstr *removeFromParent();
/// Unlink this instruction from its basic block and return it without
/// deleting it.
///
/// If the instruction is part of a bundle, the other instructions in the
/// bundle remain bundled.
MachineInstr *removeFromBundle();
/// Unlink 'this' from the containing basic block and delete it.
///
/// If this instruction is the header of a bundle, the whole bundle is erased.
/// This function can not be used for instructions inside a bundle, use
/// eraseFromBundle() to erase individual bundled instructions.
void eraseFromParent();
/// Unlink 'this' from the containing basic block and delete it.
///
/// For all definitions mark their uses in DBG_VALUE nodes
/// as undefined. Otherwise like eraseFromParent().
void eraseFromParentAndMarkDBGValuesForRemoval();
/// Unlink 'this' form its basic block and delete it.
///
/// If the instruction is part of a bundle, the other instructions in the
/// bundle remain bundled.
void eraseFromBundle();
bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; }
bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; }
/// Returns true if the MachineInstr represents a label.
bool isLabel() const { return isEHLabel() || isGCLabel(); }
bool isCFIInstruction() const {
return getOpcode() == TargetOpcode::CFI_INSTRUCTION;
}
// True if the instruction represents a position in the function.
bool isPosition() const { return isLabel() || isCFIInstruction(); }
bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; }
/// A DBG_VALUE is indirect iff the first operand is a register and
/// the second operand is an immediate.
bool isIndirectDebugValue() const {
return isDebugValue()
&& getOperand(0).isReg()
&& getOperand(1).isImm();
}
bool isPHI() const { return getOpcode() == TargetOpcode::PHI; }
bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
bool isMSInlineAsm() const {
return getOpcode() == TargetOpcode::INLINEASM && getInlineAsmDialect();
}
bool isStackAligningInlineAsm() const;
InlineAsm::AsmDialect getInlineAsmDialect() const;
bool isInsertSubreg() const {
return getOpcode() == TargetOpcode::INSERT_SUBREG;
}
bool isSubregToReg() const {
return getOpcode() == TargetOpcode::SUBREG_TO_REG;
}
bool isRegSequence() const {
return getOpcode() == TargetOpcode::REG_SEQUENCE;
}
bool isBundle() const {
return getOpcode() == TargetOpcode::BUNDLE;
}
bool isCopy() const {
return getOpcode() == TargetOpcode::COPY;
}
bool isFullCopy() const {
return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg();
}
bool isExtractSubreg() const {
return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
}
/// Return true if the instruction behaves like a copy.
/// This does not include native copy instructions.
bool isCopyLike() const {
return isCopy() || isSubregToReg();
}
/// Return true is the instruction is an identity copy.
bool isIdentityCopy() const {
return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
getOperand(0).getSubReg() == getOperand(1).getSubReg();
}
/// Return true if this is a transient instruction that is
/// either very likely to be eliminated during register allocation (such as
/// copy-like instructions), or if this instruction doesn't have an
/// execution-time cost.
bool isTransient() const {
switch(getOpcode()) {
default: return false;
// Copy-like instructions are usually eliminated during register allocation.
case TargetOpcode::PHI:
case TargetOpcode::COPY:
case TargetOpcode::INSERT_SUBREG:
case TargetOpcode::SUBREG_TO_REG:
case TargetOpcode::REG_SEQUENCE:
// Pseudo-instructions that don't produce any real output.
case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::KILL:
case TargetOpcode::CFI_INSTRUCTION:
case TargetOpcode::EH_LABEL:
case TargetOpcode::GC_LABEL:
case TargetOpcode::DBG_VALUE:
return true;
}
}
/// Return the number of instructions inside the MI bundle, excluding the
/// bundle header.
///
/// This is the number of instructions that MachineBasicBlock::iterator
/// skips, 0 for unbundled instructions.
unsigned getBundleSize() const;
/// Return true if the MachineInstr reads the specified register.
/// If TargetRegisterInfo is passed, then it also checks if there
/// is a read of a super-register.
/// This does not count partial redefines of virtual registers as reads:
/// %reg1024:6 = OP.
bool readsRegister(unsigned Reg,
const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
}
/// Return true if the MachineInstr reads the specified virtual register.
/// Take into account that a partial define is a
/// read-modify-write operation.
bool readsVirtualRegister(unsigned Reg) const {
return readsWritesVirtualRegister(Reg).first;
}
/// Return a pair of bools (reads, writes) indicating if this instruction
/// reads or writes Reg. This also considers partial defines.
/// If Ops is not null, all operand indices for Reg are added.
std::pair<bool,bool> readsWritesVirtualRegister(unsigned Reg,
SmallVectorImpl<unsigned> *Ops = nullptr) const;
/// Return true if the MachineInstr kills the specified register.
/// If TargetRegisterInfo is passed, then it also checks if there is
/// a kill of a super-register.
bool killsRegister(unsigned Reg,
const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
}
/// Return true if the MachineInstr fully defines the specified register.
/// If TargetRegisterInfo is passed, then it also checks
/// if there is a def of a super-register.
/// NOTE: It's ignoring subreg indices on virtual registers.
bool definesRegister(unsigned Reg,
const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
}
/// Return true if the MachineInstr modifies (fully define or partially
/// define) the specified register.
/// NOTE: It's ignoring subreg indices on virtual registers.
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const {
return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
}
/// Returns true if the register is dead in this machine instruction.
/// If TargetRegisterInfo is passed, then it also checks
/// if there is a dead def of a super-register.
bool registerDefIsDead(unsigned Reg,
const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
}
/// Returns the operand index that is a use of the specific register or -1
/// if it is not found. It further tightens the search criteria to a use
/// that kills the register if isKill is true.
int findRegisterUseOperandIdx(unsigned Reg, bool isKill = false,
const TargetRegisterInfo *TRI = nullptr) const;
/// Wrapper for findRegisterUseOperandIdx, it returns
/// a pointer to the MachineOperand rather than an index.
MachineOperand *findRegisterUseOperand(unsigned Reg, bool isKill = false,
const TargetRegisterInfo *TRI = nullptr) {
int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
return (Idx == -1) ? nullptr : &getOperand(Idx);
}
/// Returns the operand index that is a def of the specified register or
/// -1 if it is not found. If isDead is true, defs that are not dead are
/// skipped. If Overlap is true, then it also looks for defs that merely
/// overlap the specified register. If TargetRegisterInfo is non-null,
/// then it also checks if there is a def of a super-register.
/// This may also return a register mask operand when Overlap is true.
int findRegisterDefOperandIdx(unsigned Reg,
bool isDead = false, bool Overlap = false,
const TargetRegisterInfo *TRI = nullptr) const;
/// Wrapper for findRegisterDefOperandIdx, it returns
/// a pointer to the MachineOperand rather than an index.
MachineOperand *findRegisterDefOperand(unsigned Reg, bool isDead = false,
const TargetRegisterInfo *TRI = nullptr) {
int Idx = findRegisterDefOperandIdx(Reg, isDead, false, TRI);
return (Idx == -1) ? nullptr : &getOperand(Idx);
}
/// Find the index of the first operand in the
/// operand list that is used to represent the predicate. It returns -1 if
/// none is found.
int findFirstPredOperandIdx() const;
/// Find the index of the flag word operand that
/// corresponds to operand OpIdx on an inline asm instruction. Returns -1 if
/// getOperand(OpIdx) does not belong to an inline asm operand group.
///
/// If GroupNo is not NULL, it will receive the number of the operand group
/// containing OpIdx.
///
/// The flag operand is an immediate that can be decoded with methods like
/// InlineAsm::hasRegClassConstraint().
///
int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
/// Compute the static register class constraint for operand OpIdx.
/// For normal instructions, this is derived from the MCInstrDesc.
/// For inline assembly it is derived from the flag words.
///
/// Returns NULL if the static register class constraint cannot be
/// determined.
///
const TargetRegisterClass*
getRegClassConstraint(unsigned OpIdx,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) const;
/// \brief Applies the constraints (def/use) implied by this MI on \p Reg to
/// the given \p CurRC.
/// If \p ExploreBundle is set and MI is part of a bundle, all the
/// instructions inside the bundle will be taken into account. In other words,
/// this method accumulates all the constraints of the operand of this MI and
/// the related bundle if MI is a bundle or inside a bundle.
///
/// Returns the register class that satisfies both \p CurRC and the
/// constraints set by MI. Returns NULL if such a register class does not
/// exist.
///
/// \pre CurRC must not be NULL.
const TargetRegisterClass *getRegClassConstraintEffectForVReg(
unsigned Reg, const TargetRegisterClass *CurRC,
const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
bool ExploreBundle = false) const;
/// \brief Applies the constraints (def/use) implied by the \p OpIdx operand
/// to the given \p CurRC.
///
/// Returns the register class that satisfies both \p CurRC and the
/// constraints set by \p OpIdx MI. Returns NULL if such a register class
/// does not exist.
///
/// \pre CurRC must not be NULL.
/// \pre The operand at \p OpIdx must be a register.
const TargetRegisterClass *
getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) const;
/// Add a tie between the register operands at DefIdx and UseIdx.
/// The tie will cause the register allocator to ensure that the two
/// operands are assigned the same physical register.
///
/// Tied operands are managed automatically for explicit operands in the
/// MCInstrDesc. This method is for exceptional cases like inline asm.
void tieOperands(unsigned DefIdx, unsigned UseIdx);
/// Given the index of a tied register operand, find the
/// operand it is tied to. Defs are tied to uses and vice versa. Returns the
/// index of the tied operand which must exist.
unsigned findTiedOperandIdx(unsigned OpIdx) const;
/// Given the index of a register def operand,
/// check if the register def is tied to a source operand, due to either
/// two-address elimination or inline assembly constraints. Returns the
/// first tied use operand index by reference if UseOpIdx is not null.
bool isRegTiedToUseOperand(unsigned DefOpIdx,
unsigned *UseOpIdx = nullptr) const {
const MachineOperand &MO = getOperand(DefOpIdx);
if (!MO.isReg() || !MO.isDef() || !MO.isTied())
return false;
if (UseOpIdx)
*UseOpIdx = findTiedOperandIdx(DefOpIdx);
return true;
}
/// Return true if the use operand of the specified index is tied to a def
/// operand. It also returns the def operand index by reference if DefOpIdx
/// is not null.
bool isRegTiedToDefOperand(unsigned UseOpIdx,
unsigned *DefOpIdx = nullptr) const {
const MachineOperand &MO = getOperand(UseOpIdx);
if (!MO.isReg() || !MO.isUse() || !MO.isTied())
return false;
if (DefOpIdx)
*DefOpIdx = findTiedOperandIdx(UseOpIdx);
return true;
}
/// Clears kill flags on all operands.
void clearKillInfo();
/// Replace all occurrences of FromReg with ToReg:SubIdx,
/// properly composing subreg indices where necessary.
void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx,
const TargetRegisterInfo &RegInfo);
/// We have determined MI kills a register. Look for the
/// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
/// add a implicit operand if it's not found. Returns true if the operand
/// exists / is added.
bool addRegisterKilled(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo,
bool AddIfNotFound = false);
/// Clear all kill flags affecting Reg. If RegInfo is
/// provided, this includes super-register kills.
void clearRegisterKills(unsigned Reg, const TargetRegisterInfo *RegInfo);
/// We have determined MI defined a register without a use.
/// Look for the operand that defines it and mark it as IsDead. If
/// AddIfNotFound is true, add a implicit operand if it's not found. Returns
/// true if the operand exists / is added.
bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo,
bool AddIfNotFound = false);
/// Clear all dead flags on operands defining register @p Reg.
void clearRegisterDeads(unsigned Reg);
/// Mark all subregister defs of register @p Reg with the undef flag.
/// This function is used when we determined to have a subregister def in an
/// otherwise undefined super register.
void addRegisterDefReadUndef(unsigned Reg);
/// We have determined MI defines a register. Make sure there is an operand
/// defining Reg.
void addRegisterDefined(unsigned Reg,
const TargetRegisterInfo *RegInfo = nullptr);
/// Mark every physreg used by this instruction as
/// dead except those in the UsedRegs list.
///
/// On instructions with register mask operands, also add implicit-def
/// operands for all registers in UsedRegs.
void setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
const TargetRegisterInfo &TRI);
/// Return true if it is safe to move this instruction. If
/// SawStore is set to true, it means that there is a store (or call) between
/// the instruction's location and its intended destination.
bool isSafeToMove(AliasAnalysis *AA, bool &SawStore) const;
/// Return true if this instruction may have an ordered
/// or volatile memory reference, or if the information describing the memory
/// reference is not available. Return false if it is known to have no
/// ordered or volatile memory references.
bool hasOrderedMemoryRef() const;
/// Return true if this instruction is loading from a
/// location whose value is invariant across the function. For example,
/// loading a value from the constant pool or from the argument area of
/// a function if it does not change. This should only return true of *all*
/// loads the instruction does are invariant (if it does multiple loads).
bool isInvariantLoad(AliasAnalysis *AA) const;
/// If the specified instruction is a PHI that always merges together the
/// same virtual register, return the register, otherwise return 0.
unsigned isConstantValuePHI() const;
/// Return true if this instruction has side effects that are not modeled
/// by mayLoad / mayStore, etc.
/// For all instructions, the property is encoded in MCInstrDesc::Flags
/// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is
/// INLINEASM instruction, in which case the side effect property is encoded
/// in one of its operands (see InlineAsm::Extra_HasSideEffect).
///
bool hasUnmodeledSideEffects() const;
/// Return true if all the defs of this instruction are dead.
bool allDefsAreDead() const;
/// Copy implicit register operands from specified
/// instruction to this instruction.
void copyImplicitOps(MachineFunction &MF, const MachineInstr *MI);
//
// Debugging support
//
void print(raw_ostream &OS, bool SkipOpers = false) const;
void print(raw_ostream &OS, ModuleSlotTracker &MST,
bool SkipOpers = false) const;
void dump() const;
//===--------------------------------------------------------------------===//
// Accessors used to build up machine instructions.
/// Add the specified operand to the instruction. If it is an implicit
/// operand, it is added to the end of the operand list. If it is an
/// explicit operand it is added at the end of the explicit operand list
/// (before the first implicit operand).
///
/// MF must be the machine function that was used to allocate this
/// instruction.
///
/// MachineInstrBuilder provides a more convenient interface for creating
/// instructions and adding operands.
void addOperand(MachineFunction &MF, const MachineOperand &Op);
/// Add an operand without providing an MF reference. This only works for
/// instructions that are inserted in a basic block.
///
/// MachineInstrBuilder and the two-argument addOperand(MF, MO) should be
/// preferred.
void addOperand(const MachineOperand &Op);
/// Replace the instruction descriptor (thus opcode) of
/// the current instruction with a new one.
void setDesc(const MCInstrDesc &tid) { MCID = &tid; }
/// Replace current source information with new such.
/// Avoid using this, the constructor argument is preferable.
void setDebugLoc(DebugLoc dl) {
debugLoc = std::move(dl);
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
}
/// Erase an operand from an instruction, leaving it with one
/// fewer operand than it started with.
void RemoveOperand(unsigned i);
/// Add a MachineMemOperand to the machine instruction.
/// This function should be used only occasionally. The setMemRefs function
/// is the primary method for setting up a MachineInstr's MemRefs list.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
/// Assign this MachineInstr's memory reference descriptor list.
/// This does not transfer ownership.
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
MemRefs = NewMemRefs;
NumMemRefs = uint8_t(NewMemRefsEnd - NewMemRefs);
assert(NumMemRefs == NewMemRefsEnd - NewMemRefs && "Too many memrefs");
}
/// Clear this MachineInstr's memory reference descriptor list.
void clearMemRefs() {
MemRefs = nullptr;
NumMemRefs = 0;
}
/// Break any tie involving OpIdx.
void untieRegOperand(unsigned OpIdx) {
MachineOperand &MO = getOperand(OpIdx);
if (MO.isReg() && MO.isTied()) {
getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0;
MO.TiedTo = 0;
}
}
private:
/// If this instruction is embedded into a MachineFunction, return the
/// MachineRegisterInfo object for the current function, otherwise
/// return null.
MachineRegisterInfo *getRegInfo();
/// Add all implicit def and use operands to this instruction.
void addImplicitDefUseOperands(MachineFunction &MF);
/// Unlink all of the register operands in this instruction from their
/// respective use lists. This requires that the operands already be on their
/// use lists.
void RemoveRegOperandsFromUseLists(MachineRegisterInfo&);
/// Add all of the register operands in this instruction from their
/// respective use lists. This requires that the operands not be on their
/// use lists yet.
void AddRegOperandsToUseLists(MachineRegisterInfo&);
/// Slow path for hasProperty when we're dealing with a bundle.
bool hasPropertyInBundle(unsigned Mask, QueryType Type) const;
/// \brief Implements the logic of getRegClassConstraintEffectForVReg for the
/// this MI and the given operand index \p OpIdx.
/// If the related operand does not constrained Reg, this returns CurRC.
const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl(
unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const;
};
/// Special DenseMapInfo traits to compare MachineInstr* by *value* of the
/// instruction rather than by pointer value.
/// The hashing and equality testing functions ignore definitions so this is
/// useful for CSE, etc.
struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> {
static inline MachineInstr *getEmptyKey() {
return nullptr;
}
static inline MachineInstr *getTombstoneKey() {
return reinterpret_cast<MachineInstr*>(-1);
}
static unsigned getHashValue(const MachineInstr* const &MI);
static bool isEqual(const MachineInstr* const &LHS,
const MachineInstr* const &RHS) {
if (RHS == getEmptyKey() || RHS == getTombstoneKey() ||
LHS == getEmptyKey() || LHS == getTombstoneKey())
return LHS == RHS;
return LHS->isIdenticalTo(RHS, MachineInstr::IgnoreVRegDefs);
}
};
// //
///////////////////////////////////////////////////////////////////////////////
// Debugging Support
inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) {
MI.print(OS);
return OS;
}
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/RegisterScavenging.h | //===-- RegisterScavenging.h - Machine register scavenging ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the machine register scavenger class. It can provide
// information such as unused register at any point in a machine basic block.
// It also provides a mechanism to make registers available by evicting them
// to spill slots.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_REGISTERSCAVENGING_H
#define LLVM_CODEGEN_REGISTERSCAVENGING_H
#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
namespace llvm {
class MachineRegisterInfo;
class TargetRegisterInfo;
class TargetInstrInfo;
class TargetRegisterClass;
class RegScavenger {
const TargetRegisterInfo *TRI;
const TargetInstrInfo *TII;
MachineRegisterInfo* MRI;
MachineBasicBlock *MBB;
MachineBasicBlock::iterator MBBI;
unsigned NumRegUnits;
/// True if RegScavenger is currently tracking the liveness of registers.
bool Tracking;
/// Information on scavenged registers (held in a spill slot).
struct ScavengedInfo {
ScavengedInfo(int FI = -1) : FrameIndex(FI), Reg(0), Restore(nullptr) {}
/// A spill slot used for scavenging a register post register allocation.
int FrameIndex;
/// If non-zero, the specific register is currently being
/// scavenged. That is, it is spilled to this scavenging stack slot.
unsigned Reg;
/// The instruction that restores the scavenged register from stack.
const MachineInstr *Restore;
};
/// A vector of information on scavenged registers.
SmallVector<ScavengedInfo, 2> Scavenged;
/// The current state of each reg unit immediately before MBBI.
/// One bit per register unit. If bit is not set it means any
/// register containing that register unit is currently being used.
BitVector RegUnitsAvailable;
// These BitVectors are only used internally to forward(). They are members
// to avoid frequent reallocations.
BitVector KillRegUnits, DefRegUnits;
BitVector TmpRegUnits;
public:
RegScavenger()
: MBB(nullptr), NumRegUnits(0), Tracking(false) {}
/// Start tracking liveness from the begin of the specific basic block.
void enterBasicBlock(MachineBasicBlock *mbb);
/// Allow resetting register state info for multiple
/// passes over/within the same function.
void initRegState();
/// Move the internal MBB iterator and update register states.
void forward();
/// Move the internal MBB iterator and update register states until
/// it has processed the specific iterator.
void forward(MachineBasicBlock::iterator I) {
if (!Tracking && MBB->begin() != I) forward();
while (MBBI != I) forward();
}
/// Invert the behavior of forward() on the current instruction (undo the
/// changes to the available registers made by forward()).
void unprocess();
/// Unprocess instructions until you reach the provided iterator.
void unprocess(MachineBasicBlock::iterator I) {
while (MBBI != I) unprocess();
}
/// Move the internal MBB iterator but do not update register states.
void skipTo(MachineBasicBlock::iterator I) {
if (I == MachineBasicBlock::iterator(nullptr))
Tracking = false;
MBBI = I;
}
MachineBasicBlock::iterator getCurrentPosition() const {
return MBBI;
}
/// Return if a specific register is currently used.
bool isRegUsed(unsigned Reg, bool includeReserved = true) const;
/// Return all available registers in the register class in Mask.
BitVector getRegsAvailable(const TargetRegisterClass *RC);
/// Find an unused register of the specified register class.
/// Return 0 if none is found.
unsigned FindUnusedReg(const TargetRegisterClass *RegClass) const;
/// Add a scavenging frame index.
void addScavengingFrameIndex(int FI) {
Scavenged.push_back(ScavengedInfo(FI));
}
/// Query whether a frame index is a scavenging frame index.
bool isScavengingFrameIndex(int FI) const {
for (SmallVectorImpl<ScavengedInfo>::const_iterator I = Scavenged.begin(),
IE = Scavenged.end(); I != IE; ++I)
if (I->FrameIndex == FI)
return true;
return false;
}
/// Get an array of scavenging frame indices.
void getScavengingFrameIndices(SmallVectorImpl<int> &A) const {
for (SmallVectorImpl<ScavengedInfo>::const_iterator I = Scavenged.begin(),
IE = Scavenged.end(); I != IE; ++I)
if (I->FrameIndex >= 0)
A.push_back(I->FrameIndex);
}
/// Make a register of the specific register class
/// available and do the appropriate bookkeeping. SPAdj is the stack
/// adjustment due to call frame, it's passed along to eliminateFrameIndex().
/// Returns the scavenged register.
unsigned scavengeRegister(const TargetRegisterClass *RegClass,
MachineBasicBlock::iterator I, int SPAdj);
unsigned scavengeRegister(const TargetRegisterClass *RegClass, int SPAdj) {
return scavengeRegister(RegClass, MBBI, SPAdj);
}
/// Tell the scavenger a register is used.
void setRegUsed(unsigned Reg);
private:
/// Returns true if a register is reserved. It is never "unused".
bool isReserved(unsigned Reg) const { return MRI->isReserved(Reg); }
/// setUsed / setUnused - Mark the state of one or a number of register units.
///
void setUsed(BitVector &RegUnits) {
RegUnitsAvailable.reset(RegUnits);
}
void setUnused(BitVector &RegUnits) {
RegUnitsAvailable |= RegUnits;
}
/// Processes the current instruction and fill the KillRegUnits and
/// DefRegUnits bit vectors.
void determineKillsAndDefs();
/// Add all Reg Units that Reg contains to BV.
void addRegUnits(BitVector &BV, unsigned Reg);
/// Return the candidate register that is unused for the longest after
/// StartMI. UseMI is set to the instruction where the search stopped.
///
/// No more than InstrLimit instructions are inspected.
unsigned findSurvivorReg(MachineBasicBlock::iterator StartMI,
BitVector &Candidates,
unsigned InstrLimit,
MachineBasicBlock::iterator &UseMI);
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/ValueTypes.h | //===- CodeGen/ValueTypes.h - Low-Level Target independ. types --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the set of low-level target independent types which various
// values in the code generator are. This allows the target specific behavior
// of instructions to be described to target independent passes.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_VALUETYPES_H
#define LLVM_CODEGEN_VALUETYPES_H
#include "llvm/CodeGen/MachineValueType.h"
#include <cassert>
#include <string>
namespace llvm {
class LLVMContext;
class Type;
/// EVT - Extended Value Type. Capable of holding value types which are not
/// native for any processor (such as the i12345 type), as well as the types
/// a MVT can represent.
struct EVT {
private:
MVT V;
Type *LLVMTy;
public:
LLVM_CONSTEXPR EVT() : V(MVT::INVALID_SIMPLE_VALUE_TYPE), LLVMTy(nullptr) {}
LLVM_CONSTEXPR EVT(MVT::SimpleValueType SVT) : V(SVT), LLVMTy(nullptr) {}
LLVM_CONSTEXPR EVT(MVT S) : V(S), LLVMTy(nullptr) {}
bool operator==(EVT VT) const {
return !(*this != VT);
}
bool operator!=(EVT VT) const {
if (V.SimpleTy != VT.V.SimpleTy)
return true;
if (V.SimpleTy < 0)
return LLVMTy != VT.LLVMTy;
return false;
}
/// getFloatingPointVT - Returns the EVT that represents a floating point
/// type with the given number of bits. There are two floating point types
/// with 128 bits - this returns f128 rather than ppcf128.
static EVT getFloatingPointVT(unsigned BitWidth) {
return MVT::getFloatingPointVT(BitWidth);
}
/// getIntegerVT - Returns the EVT that represents an integer with the given
/// number of bits.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth) {
MVT M = MVT::getIntegerVT(BitWidth);
if (M.SimpleTy >= 0)
return M;
return getExtendedIntegerVT(Context, BitWidth);
}
/// getVectorVT - Returns the EVT that represents a vector NumElements in
/// length, where each element is of type VT.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements) {
MVT M = MVT::getVectorVT(VT.V, NumElements);
if (M.SimpleTy >= 0)
return M;
return getExtendedVectorVT(Context, VT, NumElements);
}
/// changeVectorElementTypeToInteger - Return a vector with the same number
/// of elements as this vector, but with the element type converted to an
/// integer type with the same bitwidth.
EVT changeVectorElementTypeToInteger() const {
if (!isSimple())
return changeExtendedVectorElementTypeToInteger();
MVT EltTy = getSimpleVT().getVectorElementType();
unsigned BitWidth = EltTy.getSizeInBits();
MVT IntTy = MVT::getIntegerVT(BitWidth);
MVT VecTy = MVT::getVectorVT(IntTy, getVectorNumElements());
assert(VecTy.SimpleTy >= 0 &&
"Simple vector VT not representable by simple integer vector VT!");
return VecTy;
}
/// isSimple - Test if the given EVT is simple (as opposed to being
/// extended).
bool isSimple() const {
return V.SimpleTy >= 0;
}
/// isExtended - Test if the given EVT is extended (as opposed to
/// being simple).
bool isExtended() const {
return !isSimple();
}
/// isFloatingPoint - Return true if this is a FP, or a vector FP type.
bool isFloatingPoint() const {
return isSimple() ? V.isFloatingPoint() : isExtendedFloatingPoint();
}
/// isInteger - Return true if this is an integer, or a vector integer type.
bool isInteger() const {
return isSimple() ? V.isInteger() : isExtendedInteger();
}
/// isVector - Return true if this is a vector value type.
bool isVector() const {
return isSimple() ? V.isVector() : isExtendedVector();
}
/// is16BitVector - Return true if this is a 16-bit vector type.
bool is16BitVector() const {
return isSimple() ? V.is16BitVector() : isExtended16BitVector();
}
/// is32BitVector - Return true if this is a 32-bit vector type.
bool is32BitVector() const {
return isSimple() ? V.is32BitVector() : isExtended32BitVector();
}
/// is64BitVector - Return true if this is a 64-bit vector type.
bool is64BitVector() const {
return isSimple() ? V.is64BitVector() : isExtended64BitVector();
}
/// is128BitVector - Return true if this is a 128-bit vector type.
bool is128BitVector() const {
return isSimple() ? V.is128BitVector() : isExtended128BitVector();
}
/// is256BitVector - Return true if this is a 256-bit vector type.
bool is256BitVector() const {
return isSimple() ? V.is256BitVector() : isExtended256BitVector();
}
/// is512BitVector - Return true if this is a 512-bit vector type.
bool is512BitVector() const {
return isSimple() ? V.is512BitVector() : isExtended512BitVector();
}
/// is1024BitVector - Return true if this is a 1024-bit vector type.
bool is1024BitVector() const {
return isSimple() ? V.is1024BitVector() : isExtended1024BitVector();
}
/// isOverloaded - Return true if this is an overloaded type for TableGen.
bool isOverloaded() const {
return (V==MVT::iAny || V==MVT::fAny || V==MVT::vAny || V==MVT::iPTRAny);
}
/// isByteSized - Return true if the bit size is a multiple of 8.
bool isByteSized() const {
return (getSizeInBits() & 7) == 0;
}
/// isRound - Return true if the size is a power-of-two number of bytes.
bool isRound() const {
unsigned BitSize = getSizeInBits();
return BitSize >= 8 && !(BitSize & (BitSize - 1));
}
/// bitsEq - Return true if this has the same number of bits as VT.
bool bitsEq(EVT VT) const {
if (EVT::operator==(VT)) return true;
return getSizeInBits() == VT.getSizeInBits();
}
/// bitsGT - Return true if this has more bits than VT.
bool bitsGT(EVT VT) const {
if (EVT::operator==(VT)) return false;
return getSizeInBits() > VT.getSizeInBits();
}
/// bitsGE - Return true if this has no less bits than VT.
bool bitsGE(EVT VT) const {
if (EVT::operator==(VT)) return true;
return getSizeInBits() >= VT.getSizeInBits();
}
/// bitsLT - Return true if this has less bits than VT.
bool bitsLT(EVT VT) const {
if (EVT::operator==(VT)) return false;
return getSizeInBits() < VT.getSizeInBits();
}
/// bitsLE - Return true if this has no more bits than VT.
bool bitsLE(EVT VT) const {
if (EVT::operator==(VT)) return true;
return getSizeInBits() <= VT.getSizeInBits();
}
/// getSimpleVT - Return the SimpleValueType held in the specified
/// simple EVT.
MVT getSimpleVT() const {
assert(isSimple() && "Expected a SimpleValueType!");
return V;
}
/// getScalarType - If this is a vector type, return the element type,
/// otherwise return this.
EVT getScalarType() const {
return isVector() ? getVectorElementType() : *this;
}
/// getVectorElementType - Given a vector type, return the type of
/// each element.
EVT getVectorElementType() const {
assert(isVector() && "Invalid vector type!");
if (isSimple())
return V.getVectorElementType();
return getExtendedVectorElementType();
}
/// getVectorNumElements - Given a vector type, return the number of
/// elements it contains.
unsigned getVectorNumElements() const {
assert(isVector() && "Invalid vector type!");
if (isSimple())
return V.getVectorNumElements();
return getExtendedVectorNumElements();
}
/// getSizeInBits - Return the size of the specified value type in bits.
unsigned getSizeInBits() const {
if (isSimple())
return V.getSizeInBits();
return getExtendedSizeInBits();
}
unsigned getScalarSizeInBits() const {
return getScalarType().getSizeInBits();
}
/// getStoreSize - Return the number of bytes overwritten by a store
/// of the specified value type.
unsigned getStoreSize() const {
return (getSizeInBits() + 7) / 8;
}
/// getStoreSizeInBits - Return the number of bits overwritten by a store
/// of the specified value type.
unsigned getStoreSizeInBits() const {
return getStoreSize() * 8;
}
/// getRoundIntegerType - Rounds the bit-width of the given integer EVT up
/// to the nearest power of two (and at least to eight), and returns the
/// integer EVT with that number of bits.
EVT getRoundIntegerType(LLVMContext &Context) const {
assert(isInteger() && !isVector() && "Invalid integer type!");
unsigned BitWidth = getSizeInBits();
if (BitWidth <= 8)
return EVT(MVT::i8);
return getIntegerVT(Context, 1 << Log2_32_Ceil(BitWidth));
}
/// getHalfSizedIntegerVT - Finds the smallest simple value type that is
/// greater than or equal to half the width of this EVT. If no simple
/// value type can be found, an extended integer value type of half the
/// size (rounded up) is returned.
EVT getHalfSizedIntegerVT(LLVMContext &Context) const {
assert(isInteger() && !isVector() && "Invalid integer type!");
unsigned EVTSize = getSizeInBits();
for (unsigned IntVT = MVT::FIRST_INTEGER_VALUETYPE;
IntVT <= MVT::LAST_INTEGER_VALUETYPE; ++IntVT) {
EVT HalfVT = EVT((MVT::SimpleValueType)IntVT);
if (HalfVT.getSizeInBits() * 2 >= EVTSize)
return HalfVT;
}
return getIntegerVT(Context, (EVTSize + 1) / 2);
}
/// \brief Return a VT for an integer vector type with the size of the
/// elements doubled. The typed returned may be an extended type.
EVT widenIntegerVectorElementType(LLVMContext &Context) const {
EVT EltVT = getVectorElementType();
EltVT = EVT::getIntegerVT(Context, 2 * EltVT.getSizeInBits());
return EVT::getVectorVT(Context, EltVT, getVectorNumElements());
}
/// isPow2VectorType - Returns true if the given vector is a power of 2.
bool isPow2VectorType() const {
unsigned NElts = getVectorNumElements();
return !(NElts & (NElts - 1));
}
/// getPow2VectorType - Widens the length of the given vector EVT up to
/// the nearest power of 2 and returns that type.
EVT getPow2VectorType(LLVMContext &Context) const {
if (!isPow2VectorType()) {
unsigned NElts = getVectorNumElements();
unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
return EVT::getVectorVT(Context, getVectorElementType(), Pow2NElts);
}
else {
return *this;
}
}
/// getEVTString - This function returns value type as a string,
/// e.g. "i32".
std::string getEVTString() const;
/// getTypeForEVT - This method returns an LLVM type corresponding to the
/// specified EVT. For integer types, this returns an unsigned type. Note
/// that this will abort for types that cannot be represented.
Type *getTypeForEVT(LLVMContext &Context) const;
/// getEVT - Return the value type corresponding to the specified type.
/// This returns all pointers as iPTR. If HandleUnknown is true, unknown
/// types are returned as Other, otherwise they are invalid.
static EVT getEVT(Type *Ty, bool HandleUnknown = false);
intptr_t getRawBits() const {
if (isSimple())
return V.SimpleTy;
else
return (intptr_t)(LLVMTy);
}
/// compareRawBits - A meaningless but well-behaved order, useful for
/// constructing containers.
struct compareRawBits {
bool operator()(EVT L, EVT R) const {
if (L.V.SimpleTy == R.V.SimpleTy)
return L.LLVMTy < R.LLVMTy;
else
return L.V.SimpleTy < R.V.SimpleTy;
}
};
private:
// Methods for handling the Extended-type case in functions above.
// These are all out-of-line to prevent users of this header file
// from having a dependency on Type.h.
EVT changeExtendedVectorElementTypeToInteger() const;
static EVT getExtendedIntegerVT(LLVMContext &C, unsigned BitWidth);
static EVT getExtendedVectorVT(LLVMContext &C, EVT VT,
unsigned NumElements);
bool isExtendedFloatingPoint() const LLVM_READONLY;
bool isExtendedInteger() const LLVM_READONLY;
bool isExtendedVector() const LLVM_READONLY;
bool isExtended16BitVector() const LLVM_READONLY;
bool isExtended32BitVector() const LLVM_READONLY;
bool isExtended64BitVector() const LLVM_READONLY;
bool isExtended128BitVector() const LLVM_READONLY;
bool isExtended256BitVector() const LLVM_READONLY;
bool isExtended512BitVector() const LLVM_READONLY;
bool isExtended1024BitVector() const LLVM_READONLY;
EVT getExtendedVectorElementType() const;
unsigned getExtendedVectorNumElements() const LLVM_READONLY;
unsigned getExtendedSizeInBits() const;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/StackMaps.h | //===------------------- StackMaps.h - StackMaps ----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_STACKMAPS_H
#define LLVM_CODEGEN_STACKMAPS_H
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Support/Debug.h"
#include <map>
#include <vector>
namespace llvm {
class AsmPrinter;
class MCExpr;
class MCStreamer;
/// \brief MI-level patchpoint operands.
///
/// MI patchpoint operations take the form:
/// [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
///
/// IR patchpoint intrinsics do not have the <cc> operand because calling
/// convention is part of the subclass data.
///
/// SD patchpoint nodes do not have a def operand because it is part of the
/// SDValue.
///
/// Patchpoints following the anyregcc convention are handled specially. For
/// these, the stack map also records the location of the return value and
/// arguments.
class PatchPointOpers {
public:
/// Enumerate the meta operands.
enum { IDPos, NBytesPos, TargetPos, NArgPos, CCPos, MetaEnd };
private:
const MachineInstr *MI;
bool HasDef;
bool IsAnyReg;
public:
explicit PatchPointOpers(const MachineInstr *MI);
bool isAnyReg() const { return IsAnyReg; }
bool hasDef() const { return HasDef; }
unsigned getMetaIdx(unsigned Pos = 0) const {
assert(Pos < MetaEnd && "Meta operand index out of range.");
return (HasDef ? 1 : 0) + Pos;
}
const MachineOperand &getMetaOper(unsigned Pos) {
return MI->getOperand(getMetaIdx(Pos));
}
unsigned getArgIdx() const { return getMetaIdx() + MetaEnd; }
/// Get the operand index of the variable list of non-argument operands.
/// These hold the "live state".
unsigned getVarIdx() const {
return getMetaIdx() + MetaEnd +
MI->getOperand(getMetaIdx(NArgPos)).getImm();
}
/// Get the index at which stack map locations will be recorded.
/// Arguments are not recorded unless the anyregcc convention is used.
unsigned getStackMapStartIdx() const {
if (IsAnyReg)
return getArgIdx();
return getVarIdx();
}
/// \brief Get the next scratch register operand index.
unsigned getNextScratchIdx(unsigned StartIdx = 0) const;
};
/// MI-level Statepoint operands
///
/// Statepoint operands take the form:
/// <id>, <num patch bytes >, <num call arguments>, <call target>,
/// [call arguments], <StackMaps::ConstantOp>, <calling convention>,
/// <StackMaps::ConstantOp>, <statepoint flags>,
/// <StackMaps::ConstantOp>, <num other args>, [other args],
/// [gc values]
class StatepointOpers {
private:
// These values are aboolute offsets into the operands of the statepoint
// instruction.
enum { IDPos, NBytesPos, NCallArgsPos, CallTargetPos, MetaEnd };
// These values are relative offests from the start of the statepoint meta
// arguments (i.e. the end of the call arguments).
enum { CCOffset = 1, FlagsOffset = 3, NumVMSArgsOffset = 5 };
public:
explicit StatepointOpers(const MachineInstr *MI) : MI(MI) {}
/// Get starting index of non call related arguments
/// (calling convention, statepoint flags, vm state and gc state).
unsigned getVarIdx() const {
return MI->getOperand(NCallArgsPos).getImm() + MetaEnd;
}
/// Return the ID for the given statepoint.
uint64_t getID() const { return MI->getOperand(IDPos).getImm(); }
/// Return the number of patchable bytes the given statepoint should emit.
uint32_t getNumPatchBytes() const {
return MI->getOperand(NBytesPos).getImm();
}
/// Returns the target of the underlying call.
const MachineOperand &getCallTarget() const {
return MI->getOperand(CallTargetPos);
}
private:
const MachineInstr *MI;
};
class StackMaps {
public:
struct Location {
enum LocationType {
Unprocessed,
Register,
Direct,
Indirect,
Constant,
ConstantIndex
};
LocationType Type;
unsigned Size;
unsigned Reg;
int64_t Offset;
Location() : Type(Unprocessed), Size(0), Reg(0), Offset(0) {}
Location(LocationType Type, unsigned Size, unsigned Reg, int64_t Offset)
: Type(Type), Size(Size), Reg(Reg), Offset(Offset) {}
};
struct LiveOutReg {
unsigned short Reg;
unsigned short DwarfRegNum;
unsigned short Size;
LiveOutReg() : Reg(0), DwarfRegNum(0), Size(0) {}
LiveOutReg(unsigned short Reg, unsigned short DwarfRegNum,
unsigned short Size)
: Reg(Reg), DwarfRegNum(DwarfRegNum), Size(Size) {}
};
// OpTypes are used to encode information about the following logical
// operand (which may consist of several MachineOperands) for the
// OpParser.
typedef enum { DirectMemRefOp, IndirectMemRefOp, ConstantOp } OpType;
StackMaps(AsmPrinter &AP);
void reset() {
CSInfos.clear();
ConstPool.clear();
FnStackSize.clear();
}
/// \brief Generate a stackmap record for a stackmap instruction.
///
/// MI must be a raw STACKMAP, not a PATCHPOINT.
void recordStackMap(const MachineInstr &MI);
/// \brief Generate a stackmap record for a patchpoint instruction.
void recordPatchPoint(const MachineInstr &MI);
/// \brief Generate a stackmap record for a statepoint instruction.
void recordStatepoint(const MachineInstr &MI);
/// If there is any stack map data, create a stack map section and serialize
/// the map info into it. This clears the stack map data structures
/// afterwards.
void serializeToStackMapSection();
private:
static const char *WSMP;
typedef SmallVector<Location, 8> LocationVec;
typedef SmallVector<LiveOutReg, 8> LiveOutVec;
typedef MapVector<uint64_t, uint64_t> ConstantPool;
typedef MapVector<const MCSymbol *, uint64_t> FnStackSizeMap;
struct CallsiteInfo {
const MCExpr *CSOffsetExpr;
uint64_t ID;
LocationVec Locations;
LiveOutVec LiveOuts;
CallsiteInfo() : CSOffsetExpr(nullptr), ID(0) {}
CallsiteInfo(const MCExpr *CSOffsetExpr, uint64_t ID,
LocationVec &&Locations, LiveOutVec &&LiveOuts)
: CSOffsetExpr(CSOffsetExpr), ID(ID), Locations(std::move(Locations)),
LiveOuts(std::move(LiveOuts)) {}
};
typedef std::vector<CallsiteInfo> CallsiteInfoList;
AsmPrinter &AP;
CallsiteInfoList CSInfos;
ConstantPool ConstPool;
FnStackSizeMap FnStackSize;
MachineInstr::const_mop_iterator
parseOperand(MachineInstr::const_mop_iterator MOI,
MachineInstr::const_mop_iterator MOE, LocationVec &Locs,
LiveOutVec &LiveOuts) const;
/// \brief Create a live-out register record for the given register @p Reg.
LiveOutReg createLiveOutReg(unsigned Reg,
const TargetRegisterInfo *TRI) const;
/// \brief Parse the register live-out mask and return a vector of live-out
/// registers that need to be recorded in the stackmap.
LiveOutVec parseRegisterLiveOutMask(const uint32_t *Mask) const;
/// This should be called by the MC lowering code _immediately_ before
/// lowering the MI to an MCInst. It records where the operands for the
/// instruction are stored, and outputs a label to record the offset of
/// the call from the start of the text section. In special cases (e.g. AnyReg
/// calling convention) the return register is also recorded if requested.
void recordStackMapOpers(const MachineInstr &MI, uint64_t ID,
MachineInstr::const_mop_iterator MOI,
MachineInstr::const_mop_iterator MOE,
bool recordResult = false);
/// \brief Emit the stackmap header.
void emitStackmapHeader(MCStreamer &OS);
/// \brief Emit the function frame record for each function.
void emitFunctionFrameRecords(MCStreamer &OS);
/// \brief Emit the constant pool.
void emitConstantPoolEntries(MCStreamer &OS);
/// \brief Emit the callsite info for each stackmap/patchpoint intrinsic call.
void emitCallsiteEntries(MCStreamer &OS);
void print(raw_ostream &OS);
void debug() { print(dbgs()); }
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/PseudoSourceValue.h | //===-- llvm/CodeGen/PseudoSourceValue.h ------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the PseudoSourceValue class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_PSEUDOSOURCEVALUE_H
#define LLVM_CODEGEN_PSEUDOSOURCEVALUE_H
#include "llvm/IR/Value.h"
namespace llvm {
class MachineFrameInfo;
class MachineMemOperand;
class raw_ostream;
raw_ostream &operator<<(raw_ostream &OS, const MachineMemOperand &MMO);
/// PseudoSourceValue - Special value supplied for machine level alias
/// analysis. It indicates that a memory access references the functions
/// stack frame (e.g., a spill slot), below the stack frame (e.g., argument
/// space), or constant pool.
class PseudoSourceValue {
private:
friend class MachineMemOperand; // For printCustom().
/// printCustom - Implement printing for PseudoSourceValue. This is called
/// from Value::print or Value's operator<<.
///
virtual void printCustom(raw_ostream &O) const;
public:
/// isFixed - Whether this is a FixedStackPseudoSourceValue.
bool isFixed;
explicit PseudoSourceValue(bool isFixed = false);
virtual ~PseudoSourceValue();
/// isConstant - Test whether the memory pointed to by this
/// PseudoSourceValue has a constant value.
///
virtual bool isConstant(const MachineFrameInfo *) const;
/// isAliased - Test whether the memory pointed to by this
/// PseudoSourceValue may also be pointed to by an LLVM IR Value.
virtual bool isAliased(const MachineFrameInfo *) const;
/// mayAlias - Return true if the memory pointed to by this
/// PseudoSourceValue can ever alias an LLVM IR Value.
virtual bool mayAlias(const MachineFrameInfo *) const;
/// A pseudo source value referencing a fixed stack frame entry,
/// e.g., a spill slot.
static const PseudoSourceValue *getFixedStack(int FI);
/// A pseudo source value referencing the area below the stack frame of
/// a function, e.g., the argument space.
static const PseudoSourceValue *getStack();
/// A pseudo source value referencing the global offset table
/// (or something the like).
static const PseudoSourceValue *getGOT();
/// A pseudo source value referencing the constant pool. Since constant
/// pools are constant, this doesn't need to identify a specific constant
/// pool entry.
static const PseudoSourceValue *getConstantPool();
/// A pseudo source value referencing a jump table. Since jump tables are
/// constant, this doesn't need to identify a specific jump table.
static const PseudoSourceValue *getJumpTable();
};
/// FixedStackPseudoSourceValue - A specialized PseudoSourceValue
/// for holding FixedStack values, which must include a frame
/// index.
class FixedStackPseudoSourceValue : public PseudoSourceValue {
const int FI;
public:
explicit FixedStackPseudoSourceValue(int fi) :
PseudoSourceValue(true), FI(fi) {}
/// classof - Methods for support type inquiry through isa, cast, and
/// dyn_cast:
///
static inline bool classof(const PseudoSourceValue *V) {
return V->isFixed == true;
}
bool isConstant(const MachineFrameInfo *MFI) const override;
bool isAliased(const MachineFrameInfo *MFI) const override;
bool mayAlias(const MachineFrameInfo *) const override;
void printCustom(raw_ostream &OS) const override;
int getFrameIndex() const { return FI; }
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/SchedulerRegistry.h | //===-- llvm/CodeGen/SchedulerRegistry.h ------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation for instruction scheduler function
// pass registry (RegisterScheduler).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_SCHEDULERREGISTRY_H
#define LLVM_CODEGEN_SCHEDULERREGISTRY_H
#include "llvm/CodeGen/MachinePassRegistry.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
//===----------------------------------------------------------------------===//
///
/// RegisterScheduler class - Track the registration of instruction schedulers.
///
// //
///////////////////////////////////////////////////////////////////////////////
class SelectionDAGISel;
class ScheduleDAGSDNodes;
class SelectionDAG;
class MachineBasicBlock;
class RegisterScheduler : public MachinePassRegistryNode {
public:
typedef ScheduleDAGSDNodes *(*FunctionPassCtor)(SelectionDAGISel*,
CodeGenOpt::Level);
static MachinePassRegistry Registry;
RegisterScheduler(const char *N, const char *D, FunctionPassCtor C)
: MachinePassRegistryNode(N, D, (MachinePassCtor)C)
{ Registry.Add(this); }
~RegisterScheduler() { Registry.Remove(this); }
// Accessors.
//
RegisterScheduler *getNext() const {
return (RegisterScheduler *)MachinePassRegistryNode::getNext();
}
static RegisterScheduler *getList() {
return (RegisterScheduler *)Registry.getList();
}
static FunctionPassCtor getDefault() {
return (FunctionPassCtor)Registry.getDefault();
}
static void setDefault(FunctionPassCtor C) {
Registry.setDefault((MachinePassCtor)C);
}
static void setListener(MachinePassRegistryListener *L) {
Registry.setListener(L);
}
};
/// createBURRListDAGScheduler - This creates a bottom up register usage
/// reduction list scheduler.
ScheduleDAGSDNodes *createBURRListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel);
/// createBURRListDAGScheduler - This creates a bottom up list scheduler that
/// schedules nodes in source code order when possible.
ScheduleDAGSDNodes *createSourceListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel);
/// createHybridListDAGScheduler - This creates a bottom up register pressure
/// aware list scheduler that make use of latency information to avoid stalls
/// for long latency instructions in low register pressure mode. In high
/// register pressure mode it schedules to reduce register pressure.
ScheduleDAGSDNodes *createHybridListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level);
/// createILPListDAGScheduler - This creates a bottom up register pressure
/// aware list scheduler that tries to increase instruction level parallelism
/// in low register pressure mode. In high register pressure mode it schedules
/// to reduce register pressure.
ScheduleDAGSDNodes *createILPListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level);
/// createFastDAGScheduler - This creates a "fast" scheduler.
///
ScheduleDAGSDNodes *createFastDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel);
/// createVLIWDAGScheduler - Scheduler for VLIW targets. This creates top down
/// DFA driven list scheduler with clustering heuristic to control
/// register pressure.
ScheduleDAGSDNodes *createVLIWDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel);
/// createDefaultScheduler - This creates an instruction scheduler appropriate
/// for the target.
ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel);
/// createDAGLinearizer - This creates a "no-scheduling" scheduler which
/// linearize the DAG using topological order.
ScheduleDAGSDNodes *createDAGLinearizer(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel);
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineFunctionAnalysis.h | //===-- MachineFunctionAnalysis.h - Owner of MachineFunctions ----*-C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the MachineFunctionAnalysis class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEFUNCTIONANALYSIS_H
#define LLVM_CODEGEN_MACHINEFUNCTIONANALYSIS_H
#include "llvm/Pass.h"
namespace llvm {
class MachineFunction;
class MachineFunctionInitializer;
class TargetMachine;
/// MachineFunctionAnalysis - This class is a Pass that manages a
/// MachineFunction object.
struct MachineFunctionAnalysis : public FunctionPass {
private:
const TargetMachine &TM;
MachineFunction *MF;
unsigned NextFnNum;
MachineFunctionInitializer *MFInitializer;
public:
static char ID;
explicit MachineFunctionAnalysis(const TargetMachine &tm,
MachineFunctionInitializer *MFInitializer);
~MachineFunctionAnalysis() override;
MachineFunction &getMF() const { return *MF; }
const char* getPassName() const override {
return "Machine Function Analysis";
}
private:
bool doInitialization(Module &M) override;
bool runOnFunction(Function &F) override;
void releaseMemory() override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/SelectionDAG.h | //===-- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the SelectionDAG class, and transitively defines the
// SDNode class and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_SELECTIONDAG_H
#define LLVM_CODEGEN_SELECTIONDAG_H
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/ilist.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/Support/RecyclingAllocator.h"
#include "llvm/Target/TargetMachine.h"
#include <cassert>
#include <map>
#include <string>
#include <vector>
namespace llvm {
class AliasAnalysis;
class MachineConstantPoolValue;
class MachineFunction;
class MDNode;
class SDDbgValue;
class TargetLowering;
class TargetSelectionDAGInfo;
class SDVTListNode : public FoldingSetNode {
friend struct FoldingSetTrait<SDVTListNode>;
/// A reference to an Interned FoldingSetNodeID for this node.
/// The Allocator in SelectionDAG holds the data.
/// SDVTList contains all types which are frequently accessed in SelectionDAG.
/// The size of this list is not expected to be big so it won't introduce
/// a memory penalty.
FoldingSetNodeIDRef FastID;
const EVT *VTs;
unsigned int NumVTs;
/// The hash value for SDVTList is fixed, so cache it to avoid
/// hash calculation.
unsigned HashValue;
public:
SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
FastID(ID), VTs(VT), NumVTs(Num) {
HashValue = ID.ComputeHash();
}
SDVTList getSDVTList() {
SDVTList result = {VTs, NumVTs};
return result;
}
};
/// Specialize FoldingSetTrait for SDVTListNode
/// to avoid computing temp FoldingSetNodeID and hash value.
template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTListNode> {
static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
ID = X.FastID;
}
static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
unsigned IDHash, FoldingSetNodeID &TempID) {
if (X.HashValue != IDHash)
return false;
return ID == X.FastID;
}
static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
return X.HashValue;
}
};
template<> struct ilist_traits<SDNode> : public ilist_default_traits<SDNode> {
private:
mutable ilist_half_node<SDNode> Sentinel;
public:
// HLSL Change Starts
// Temporarily disable "downcast of address" UBSAN runtime error
// https://github.com/microsoft/DirectXShaderCompiler/issues/6446
#ifdef __has_feature
#if __has_feature(undefined_behavior_sanitizer)
__attribute__((no_sanitize("undefined")))
#endif // __has_feature(address_sanitizer)
#endif // defined(__has_feature)
// HLSL Change Ends
SDNode *
createSentinel() const {
return static_cast<SDNode*>(&Sentinel);
}
static void destroySentinel(SDNode *) {}
SDNode *provideInitialHead() const { return createSentinel(); }
SDNode *ensureHead(SDNode*) const { return createSentinel(); }
static void noteHead(SDNode*, SDNode*) {}
static void deleteNode(SDNode *) {
llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!");
}
private:
static void createNode(const SDNode &);
};
/// Keeps track of dbg_value information through SDISel. We do
/// not build SDNodes for these so as not to perturb the generated code;
/// instead the info is kept off to the side in this structure. Each SDNode may
/// have one or more associated dbg_value entries. This information is kept in
/// DbgValMap.
/// Byval parameters are handled separately because they don't use alloca's,
/// which busts the normal mechanism. There is good reason for handling all
/// parameters separately: they may not have code generated for them, they
/// should always go at the beginning of the function regardless of other code
/// motion, and debug info for them is potentially useful even if the parameter
/// is unused. Right now only byval parameters are handled separately.
class SDDbgInfo {
BumpPtrAllocator Alloc;
SmallVector<SDDbgValue*, 32> DbgValues;
SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
typedef DenseMap<const SDNode*, SmallVector<SDDbgValue*, 2> > DbgValMapType;
DbgValMapType DbgValMap;
void operator=(const SDDbgInfo&) = delete;
SDDbgInfo(const SDDbgInfo&) = delete;
public:
SDDbgInfo() {}
void add(SDDbgValue *V, const SDNode *Node, bool isParameter) {
if (isParameter) {
ByvalParmDbgValues.push_back(V);
} else DbgValues.push_back(V);
if (Node)
DbgValMap[Node].push_back(V);
}
/// \brief Invalidate all DbgValues attached to the node and remove
/// it from the Node-to-DbgValues map.
void erase(const SDNode *Node);
void clear() {
DbgValMap.clear();
DbgValues.clear();
ByvalParmDbgValues.clear();
Alloc.Reset();
}
BumpPtrAllocator &getAlloc() { return Alloc; }
bool empty() const {
return DbgValues.empty() && ByvalParmDbgValues.empty();
}
ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) {
DbgValMapType::iterator I = DbgValMap.find(Node);
if (I != DbgValMap.end())
return I->second;
return ArrayRef<SDDbgValue*>();
}
typedef SmallVectorImpl<SDDbgValue*>::iterator DbgIterator;
DbgIterator DbgBegin() { return DbgValues.begin(); }
DbgIterator DbgEnd() { return DbgValues.end(); }
DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
DbgIterator ByvalParmDbgEnd() { return ByvalParmDbgValues.end(); }
};
class SelectionDAG;
void checkForCycles(const SelectionDAG *DAG, bool force = false);
/// This is used to represent a portion of an LLVM function in a low-level
/// Data Dependence DAG representation suitable for instruction selection.
/// This DAG is constructed as the first step of instruction selection in order
/// to allow implementation of machine specific optimizations
/// and code simplifications.
///
/// The representation used by the SelectionDAG is a target-independent
/// representation, which has some similarities to the GCC RTL representation,
/// but is significantly more simple, powerful, and is a graph form instead of a
/// linear form.
///
class SelectionDAG {
const TargetMachine &TM;
const TargetSelectionDAGInfo *TSI;
const TargetLowering *TLI;
MachineFunction *MF;
LLVMContext *Context;
CodeGenOpt::Level OptLevel;
/// The starting token.
SDNode EntryNode;
/// The root of the entire DAG.
SDValue Root;
/// A linked list of nodes in the current DAG.
ilist<SDNode> AllNodes;
/// The AllocatorType for allocating SDNodes. We use
/// pool allocation with recycling.
typedef RecyclingAllocator<BumpPtrAllocator, SDNode, sizeof(LargestSDNode),
AlignOf<MostAlignedSDNode>::Alignment>
NodeAllocatorType;
/// Pool allocation for nodes.
NodeAllocatorType NodeAllocator;
/// This structure is used to memoize nodes, automatically performing
/// CSE with existing nodes when a duplicate is requested.
FoldingSet<SDNode> CSEMap;
/// Pool allocation for machine-opcode SDNode operands.
BumpPtrAllocator OperandAllocator;
/// Pool allocation for misc. objects that are created once per SelectionDAG.
BumpPtrAllocator Allocator;
/// Tracks dbg_value information through SDISel.
SDDbgInfo *DbgInfo;
public:
/// Clients of various APIs that cause global effects on
/// the DAG can optionally implement this interface. This allows the clients
/// to handle the various sorts of updates that happen.
///
/// A DAGUpdateListener automatically registers itself with DAG when it is
/// constructed, and removes itself when destroyed in RAII fashion.
struct DAGUpdateListener {
DAGUpdateListener *const Next;
SelectionDAG &DAG;
explicit DAGUpdateListener(SelectionDAG &D)
: Next(D.UpdateListeners), DAG(D) {
DAG.UpdateListeners = this;
}
virtual ~DAGUpdateListener() {
assert(DAG.UpdateListeners == this &&
"DAGUpdateListeners must be destroyed in LIFO order");
DAG.UpdateListeners = Next;
}
/// The node N that was deleted and, if E is not null, an
/// equivalent node E that replaced it.
virtual void NodeDeleted(SDNode *N, SDNode *E);
/// The node N that was updated.
virtual void NodeUpdated(SDNode *N);
};
/// When true, additional steps are taken to
/// ensure that getConstant() and similar functions return DAG nodes that
/// have legal types. This is important after type legalization since
/// any illegally typed nodes generated after this point will not experience
/// type legalization.
bool NewNodesMustHaveLegalTypes;
private:
/// DAGUpdateListener is a friend so it can manipulate the listener stack.
friend struct DAGUpdateListener;
/// Linked list of registered DAGUpdateListener instances.
/// This stack is maintained by DAGUpdateListener RAII.
DAGUpdateListener *UpdateListeners;
/// Implementation of setSubgraphColor.
/// Return whether we had to truncate the search.
bool setSubgraphColorHelper(SDNode *N, const char *Color,
DenseSet<SDNode *> &visited,
int level, bool &printed);
void operator=(const SelectionDAG&) = delete;
SelectionDAG(const SelectionDAG&) = delete;
public:
explicit SelectionDAG(const TargetMachine &TM, llvm::CodeGenOpt::Level);
~SelectionDAG();
/// Prepare this SelectionDAG to process code in the given MachineFunction.
void init(MachineFunction &mf);
/// Clear state and free memory necessary to make this
/// SelectionDAG ready to process a new block.
void clear();
MachineFunction &getMachineFunction() const { return *MF; }
const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
const TargetMachine &getTarget() const { return TM; }
const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
const TargetSelectionDAGInfo &getSelectionDAGInfo() const { return *TSI; }
LLVMContext *getContext() const {return Context; }
/// Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
void viewGraph(const std::string &Title);
void viewGraph();
#ifndef NDEBUG
std::map<const SDNode *, std::string> NodeGraphAttrs;
#endif
/// Clear all previously defined node graph attributes.
/// Intended to be used from a debugging tool (eg. gdb).
void clearGraphAttrs();
/// Set graph attributes for a node. (eg. "color=red".)
void setGraphAttrs(const SDNode *N, const char *Attrs);
/// Get graph attributes for a node. (eg. "color=red".)
/// Used from getNodeAttributes.
const std::string getGraphAttrs(const SDNode *N) const;
/// Convenience for setting node color attribute.
void setGraphColor(const SDNode *N, const char *Color);
/// Convenience for setting subgraph color attribute.
void setSubgraphColor(SDNode *N, const char *Color);
typedef ilist<SDNode>::const_iterator allnodes_const_iterator;
allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
typedef ilist<SDNode>::iterator allnodes_iterator;
allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
allnodes_iterator allnodes_end() { return AllNodes.end(); }
ilist<SDNode>::size_type allnodes_size() const {
return AllNodes.size();
}
iterator_range<allnodes_iterator> allnodes() {
return iterator_range<allnodes_iterator>(allnodes_begin(), allnodes_end());
}
iterator_range<allnodes_const_iterator> allnodes() const {
return iterator_range<allnodes_const_iterator>(allnodes_begin(),
allnodes_end());
}
/// Return the root tag of the SelectionDAG.
const SDValue &getRoot() const { return Root; }
/// Return the token chain corresponding to the entry of the function.
SDValue getEntryNode() const {
return SDValue(const_cast<SDNode *>(&EntryNode), 0);
}
/// Set the current root tag of the SelectionDAG.
///
const SDValue &setRoot(SDValue N) {
assert((!N.getNode() || N.getValueType() == MVT::Other) &&
"DAG root value is not a chain!");
if (N.getNode())
checkForCycles(N.getNode(), this);
Root = N;
if (N.getNode())
checkForCycles(this);
return Root;
}
/// This iterates over the nodes in the SelectionDAG, folding
/// certain types of nodes together, or eliminating superfluous nodes. The
/// Level argument controls whether Combine is allowed to produce nodes and
/// types that are illegal on the target.
void Combine(CombineLevel Level, AliasAnalysis &AA,
CodeGenOpt::Level OptLevel);
/// This transforms the SelectionDAG into a SelectionDAG that
/// only uses types natively supported by the target.
/// Returns "true" if it made any changes.
///
/// Note that this is an involved process that may invalidate pointers into
/// the graph.
bool LegalizeTypes();
/// This transforms the SelectionDAG into a SelectionDAG that is
/// compatible with the target instruction selector, as indicated by the
/// TargetLowering object.
///
/// Note that this is an involved process that may invalidate pointers into
/// the graph.
void Legalize();
/// \brief Transforms a SelectionDAG node and any operands to it into a node
/// that is compatible with the target instruction selector, as indicated by
/// the TargetLowering object.
///
/// \returns true if \c N is a valid, legal node after calling this.
///
/// This essentially runs a single recursive walk of the \c Legalize process
/// over the given node (and its operands). This can be used to incrementally
/// legalize the DAG. All of the nodes which are directly replaced,
/// potentially including N, are added to the output parameter \c
/// UpdatedNodes so that the delta to the DAG can be understood by the
/// caller.
///
/// When this returns false, N has been legalized in a way that make the
/// pointer passed in no longer valid. It may have even been deleted from the
/// DAG, and so it shouldn't be used further. When this returns true, the
/// N passed in is a legal node, and can be immediately processed as such.
/// This may still have done some work on the DAG, and will still populate
/// UpdatedNodes with any new nodes replacing those originally in the DAG.
bool LegalizeOp(SDNode *N, SmallSetVector<SDNode *, 16> &UpdatedNodes);
/// This transforms the SelectionDAG into a SelectionDAG
/// that only uses vector math operations supported by the target. This is
/// necessary as a separate step from Legalize because unrolling a vector
/// operation can introduce illegal types, which requires running
/// LegalizeTypes again.
///
/// This returns true if it made any changes; in that case, LegalizeTypes
/// is called again before Legalize.
///
/// Note that this is an involved process that may invalidate pointers into
/// the graph.
bool LegalizeVectors();
/// This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNodes();
/// Remove the specified node from the system. This node must
/// have no referrers.
void DeleteNode(SDNode *N);
/// Return an SDVTList that represents the list of values specified.
SDVTList getVTList(EVT VT);
SDVTList getVTList(EVT VT1, EVT VT2);
SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3);
SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4);
SDVTList getVTList(ArrayRef<EVT> VTs);
//===--------------------------------------------------------------------===//
// Node creation methods.
//
SDValue getConstant(uint64_t Val, SDLoc DL, EVT VT, bool isTarget = false,
bool isOpaque = false);
SDValue getConstant(const APInt &Val, SDLoc DL, EVT VT, bool isTarget = false,
bool isOpaque = false);
SDValue getConstant(const ConstantInt &Val, SDLoc DL, EVT VT,
bool isTarget = false, bool isOpaque = false);
SDValue getIntPtrConstant(uint64_t Val, SDLoc DL, bool isTarget = false);
SDValue getTargetConstant(uint64_t Val, SDLoc DL, EVT VT,
bool isOpaque = false) {
return getConstant(Val, DL, VT, true, isOpaque);
}
SDValue getTargetConstant(const APInt &Val, SDLoc DL, EVT VT,
bool isOpaque = false) {
return getConstant(Val, DL, VT, true, isOpaque);
}
SDValue getTargetConstant(const ConstantInt &Val, SDLoc DL, EVT VT,
bool isOpaque = false) {
return getConstant(Val, DL, VT, true, isOpaque);
}
// The forms below that take a double should only be used for simple
// constants that can be exactly represented in VT. No checks are made.
SDValue getConstantFP(double Val, SDLoc DL, EVT VT, bool isTarget = false);
SDValue getConstantFP(const APFloat& Val, SDLoc DL, EVT VT,
bool isTarget = false);
SDValue getConstantFP(const ConstantFP &CF, SDLoc DL, EVT VT,
bool isTarget = false);
SDValue getTargetConstantFP(double Val, SDLoc DL, EVT VT) {
return getConstantFP(Val, DL, VT, true);
}
SDValue getTargetConstantFP(const APFloat& Val, SDLoc DL, EVT VT) {
return getConstantFP(Val, DL, VT, true);
}
SDValue getTargetConstantFP(const ConstantFP &Val, SDLoc DL, EVT VT) {
return getConstantFP(Val, DL, VT, true);
}
SDValue getGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT,
int64_t offset = 0, bool isTargetGA = false,
unsigned char TargetFlags = 0);
SDValue getTargetGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT,
int64_t offset = 0,
unsigned char TargetFlags = 0) {
return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
}
SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
SDValue getTargetFrameIndex(int FI, EVT VT) {
return getFrameIndex(FI, VT, true);
}
SDValue getJumpTable(int JTI, EVT VT, bool isTarget = false,
unsigned char TargetFlags = 0);
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags = 0) {
return getJumpTable(JTI, VT, true, TargetFlags);
}
SDValue getConstantPool(const Constant *C, EVT VT,
unsigned Align = 0, int Offs = 0, bool isT=false,
unsigned char TargetFlags = 0);
SDValue getTargetConstantPool(const Constant *C, EVT VT,
unsigned Align = 0, int Offset = 0,
unsigned char TargetFlags = 0) {
return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
}
SDValue getConstantPool(MachineConstantPoolValue *C, EVT VT,
unsigned Align = 0, int Offs = 0, bool isT=false,
unsigned char TargetFlags = 0);
SDValue getTargetConstantPool(MachineConstantPoolValue *C,
EVT VT, unsigned Align = 0,
int Offset = 0, unsigned char TargetFlags=0) {
return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
}
SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
unsigned char TargetFlags = 0);
// When generating a branch to a BB, we don't in general know enough
// to provide debug info for the BB at that time, so keep this one around.
SDValue getBasicBlock(MachineBasicBlock *MBB);
SDValue getBasicBlock(MachineBasicBlock *MBB, SDLoc dl);
SDValue getExternalSymbol(const char *Sym, EVT VT);
SDValue getExternalSymbol(const char *Sym, SDLoc dl, EVT VT);
SDValue getTargetExternalSymbol(const char *Sym, EVT VT,
unsigned char TargetFlags = 0);
SDValue getMCSymbol(MCSymbol *Sym, EVT VT);
SDValue getValueType(EVT);
SDValue getRegister(unsigned Reg, EVT VT);
SDValue getRegisterMask(const uint32_t *RegMask);
SDValue getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label);
SDValue getBlockAddress(const BlockAddress *BA, EVT VT,
int64_t Offset = 0, bool isTarget = false,
unsigned char TargetFlags = 0);
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT,
int64_t Offset = 0,
unsigned char TargetFlags = 0) {
return getBlockAddress(BA, VT, Offset, true, TargetFlags);
}
SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N) {
return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
getRegister(Reg, N.getValueType()), N);
}
// This version of the getCopyToReg method takes an extra operand, which
// indicates that there is potentially an incoming glue value (if Glue is not
// null) and that there should be a glue result.
SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N,
SDValue Glue) {
SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
return getNode(ISD::CopyToReg, dl, VTs,
ArrayRef<SDValue>(Ops, Glue.getNode() ? 4 : 3));
}
// Similar to last getCopyToReg() except parameter Reg is a SDValue
SDValue getCopyToReg(SDValue Chain, SDLoc dl, SDValue Reg, SDValue N,
SDValue Glue) {
SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, Reg, N, Glue };
return getNode(ISD::CopyToReg, dl, VTs,
ArrayRef<SDValue>(Ops, Glue.getNode() ? 4 : 3));
}
SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT) {
SDVTList VTs = getVTList(VT, MVT::Other);
SDValue Ops[] = { Chain, getRegister(Reg, VT) };
return getNode(ISD::CopyFromReg, dl, VTs, Ops);
}
// This version of the getCopyFromReg method takes an extra operand, which
// indicates that there is potentially an incoming glue value (if Glue is not
// null) and that there should be a glue result.
SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT,
SDValue Glue) {
SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
return getNode(ISD::CopyFromReg, dl, VTs,
ArrayRef<SDValue>(Ops, Glue.getNode() ? 3 : 2));
}
SDValue getCondCode(ISD::CondCode Cond);
/// Returns the ConvertRndSat Note: Avoid using this node because it may
/// disappear in the future and most targets don't support it.
SDValue getConvertRndSat(EVT VT, SDLoc dl, SDValue Val, SDValue DTy,
SDValue STy,
SDValue Rnd, SDValue Sat, ISD::CvtCode Code);
/// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
/// which must be a vector type, must match the number of mask elements
/// NumElts. An integer mask element equal to -1 is treated as undefined.
SDValue getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, SDValue N2,
const int *MaskElts);
SDValue getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, SDValue N2,
ArrayRef<int> MaskElts) {
assert(VT.getVectorNumElements() == MaskElts.size() &&
"Must have the same number of vector elements as mask elements!");
return getVectorShuffle(VT, dl, N1, N2, MaskElts.data());
}
/// \brief Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
/// the shuffle node in input but with swapped operands.
///
/// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV);
/// Convert Op, which must be of integer type, to the
/// integer type VT, by either any-extending or truncating it.
SDValue getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT);
/// Convert Op, which must be of integer type, to the
/// integer type VT, by either sign-extending or truncating it.
SDValue getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT);
/// Convert Op, which must be of integer type, to the
/// integer type VT, by either zero-extending or truncating it.
SDValue getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT);
/// Return the expression required to zero extend the Op
/// value assuming it was the smaller SrcTy value.
SDValue getZeroExtendInReg(SDValue Op, SDLoc DL, EVT SrcTy);
/// Return an operation which will any-extend the low lanes of the operand
/// into the specified vector type. For example,
/// this can convert a v16i8 into a v4i32 by any-extending the low four
/// lanes of the operand from i8 to i32.
SDValue getAnyExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT);
/// Return an operation which will sign extend the low lanes of the operand
/// into the specified vector type. For example,
/// this can convert a v16i8 into a v4i32 by sign extending the low four
/// lanes of the operand from i8 to i32.
SDValue getSignExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT);
/// Return an operation which will zero extend the low lanes of the operand
/// into the specified vector type. For example,
/// this can convert a v16i8 into a v4i32 by zero extending the low four
/// lanes of the operand from i8 to i32.
SDValue getZeroExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT);
/// Convert Op, which must be of integer type, to the integer type VT,
/// by using an extension appropriate for the target's
/// BooleanContent for type OpVT or truncating it.
SDValue getBoolExtOrTrunc(SDValue Op, SDLoc SL, EVT VT, EVT OpVT);
/// Create a bitwise NOT operation as (XOR Val, -1).
SDValue getNOT(SDLoc DL, SDValue Val, EVT VT);
/// \brief Create a logical NOT operation as (XOR Val, BooleanOne).
SDValue getLogicalNOT(SDLoc DL, SDValue Val, EVT VT);
/// Return a new CALLSEQ_START node, which always must have a glue result
/// (to ensure it's not CSE'd). CALLSEQ_START does not have a useful SDLoc.
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, SDLoc DL) {
SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, Op };
return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
}
/// Return a new CALLSEQ_END node, which always must have a
/// glue result (to ensure it's not CSE'd).
/// CALLSEQ_END does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
SDValue InGlue, SDLoc DL) {
SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 4> Ops;
Ops.push_back(Chain);
Ops.push_back(Op1);
Ops.push_back(Op2);
if (InGlue.getNode())
Ops.push_back(InGlue);
return getNode(ISD::CALLSEQ_END, DL, NodeTys, Ops);
}
/// Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getUNDEF(EVT VT) {
return getNode(ISD::UNDEF, SDLoc(), VT);
}
/// Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getGLOBAL_OFFSET_TABLE(EVT VT) {
return getNode(ISD::GLOBAL_OFFSET_TABLE, SDLoc(), VT);
}
/// Gets or creates the specified node.
///
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT,
ArrayRef<SDUse> Ops);
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT,
ArrayRef<SDValue> Ops);
SDValue getNode(unsigned Opcode, SDLoc DL, ArrayRef<EVT> ResultTys,
ArrayRef<SDValue> Ops);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
ArrayRef<SDValue> Ops);
// Specialize based on number of operands.
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT);
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N);
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, SDValue N2,
const SDNodeFlags *Flags = nullptr);
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, SDValue N2,
SDValue N3);
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, SDValue N2,
SDValue N3, SDValue N4);
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, SDValue N2,
SDValue N3, SDValue N4, SDValue N5);
// Specialize again based on number of operands for nodes with a VTList
// rather than a single VT.
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N1,
SDValue N2);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N1,
SDValue N2, SDValue N3);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N1,
SDValue N2, SDValue N3, SDValue N4);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N1,
SDValue N2, SDValue N3, SDValue N4, SDValue N5);
/// Compute a TokenFactor to force all the incoming stack arguments to be
/// loaded from the stack. This is used in tail call lowering to protect
/// stack arguments from being clobbered.
SDValue getStackArgumentTokenFactor(SDValue Chain);
SDValue getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVol, bool AlwaysInline,
bool isTailCall, MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo);
SDValue getMemmove(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVol, bool isTailCall,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo);
SDValue getMemset(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVol, bool isTailCall,
MachinePointerInfo DstPtrInfo);
/// Helper function to make it easier to build SetCC's if you just
/// have an ISD::CondCode instead of an SDValue.
///
SDValue getSetCC(SDLoc DL, EVT VT, SDValue LHS, SDValue RHS,
ISD::CondCode Cond) {
assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&
"Cannot compare scalars to vectors");
assert(LHS.getValueType().isVector() == VT.isVector() &&
"Cannot compare scalars to vectors");
assert(Cond != ISD::SETCC_INVALID &&
"Cannot create a setCC of an invalid node.");
return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
}
/// Helper function to make it easier to build Select's if you just
/// have operands and don't want to check for vector.
SDValue getSelect(SDLoc DL, EVT VT, SDValue Cond,
SDValue LHS, SDValue RHS) {
assert(LHS.getValueType() == RHS.getValueType() &&
"Cannot use select on differing types");
assert(VT.isVector() == LHS.getValueType().isVector() &&
"Cannot mix vectors and scalars");
return getNode(Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT,
Cond, LHS, RHS);
}
/// Helper function to make it easier to build SelectCC's if you
/// just have an ISD::CondCode instead of an SDValue.
///
SDValue getSelectCC(SDLoc DL, SDValue LHS, SDValue RHS,
SDValue True, SDValue False, ISD::CondCode Cond) {
return getNode(ISD::SELECT_CC, DL, True.getValueType(),
LHS, RHS, True, False, getCondCode(Cond));
}
/// VAArg produces a result and token chain, and takes a pointer
/// and a source value as input.
SDValue getVAArg(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
SDValue SV, unsigned Align);
/// Gets a node for an atomic cmpxchg op. There are two
/// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
/// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
/// a success flag (initially i1), and a chain.
SDValue getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs,
SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp,
MachinePointerInfo PtrInfo, unsigned Alignment,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope);
SDValue getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs,
SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp,
MachineMemOperand *MMO,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope);
/// Gets a node for an atomic op, produces result (if relevant)
/// and chain and takes 2 operands.
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Val, const Value *PtrVal,
unsigned Alignment, AtomicOrdering Ordering,
SynchronizationScope SynchScope);
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Val, MachineMemOperand *MMO,
AtomicOrdering Ordering,
SynchronizationScope SynchScope);
/// Gets a node for an atomic op, produces result and chain and
/// takes 1 operand.
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, EVT VT,
SDValue Chain, SDValue Ptr, MachineMemOperand *MMO,
AtomicOrdering Ordering,
SynchronizationScope SynchScope);
/// Gets a node for an atomic op, produces result and chain and takes N
/// operands.
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTList,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope);
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTList,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
AtomicOrdering Ordering, SynchronizationScope SynchScope);
/// Creates a MemIntrinsicNode that may produce a
/// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
/// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
/// less than FIRST_TARGET_MEMORY_OPCODE.
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
ArrayRef<SDValue> Ops,
EVT MemVT, MachinePointerInfo PtrInfo,
unsigned Align = 0, bool Vol = false,
bool ReadMem = true, bool WriteMem = true,
unsigned Size = 0);
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
ArrayRef<SDValue> Ops,
EVT MemVT, MachineMemOperand *MMO);
/// Create a MERGE_VALUES node from the given operands.
SDValue getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl);
/// Loads are not normal binary operators: their result type is not
/// determined by their operands, and they produce a value AND a token chain.
///
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
MachinePointerInfo PtrInfo, bool isVolatile,
bool isNonTemporal, bool isInvariant, unsigned Alignment,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr);
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
MachineMemOperand *MMO);
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo,
EVT MemVT, bool isVolatile,
bool isNonTemporal, bool isInvariant, unsigned Alignment,
const AAMDNodes &AAInfo = AAMDNodes());
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
SDValue Chain, SDValue Ptr, EVT MemVT,
MachineMemOperand *MMO);
SDValue getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM);
SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, SDLoc dl,
SDValue Chain, SDValue Ptr, SDValue Offset,
MachinePointerInfo PtrInfo, EVT MemVT,
bool isVolatile, bool isNonTemporal, bool isInvariant,
unsigned Alignment, const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr);
SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, SDLoc dl,
SDValue Chain, SDValue Ptr, SDValue Offset,
EVT MemVT, MachineMemOperand *MMO);
/// Helper function to build ISD::STORE nodes.
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
MachinePointerInfo PtrInfo, bool isVolatile,
bool isNonTemporal, unsigned Alignment,
const AAMDNodes &AAInfo = AAMDNodes());
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
MachineMemOperand *MMO);
SDValue getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
MachinePointerInfo PtrInfo, EVT TVT,
bool isNonTemporal, bool isVolatile,
unsigned Alignment,
const AAMDNodes &AAInfo = AAMDNodes());
SDValue getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
EVT TVT, MachineMemOperand *MMO);
SDValue getIndexedStore(SDValue OrigStoe, SDLoc dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM);
SDValue getMaskedLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
SDValue Mask, SDValue Src0, EVT MemVT,
MachineMemOperand *MMO, ISD::LoadExtType);
SDValue getMaskedStore(SDValue Chain, SDLoc dl, SDValue Val,
SDValue Ptr, SDValue Mask, EVT MemVT,
MachineMemOperand *MMO, bool IsTrunc);
SDValue getMaskedGather(SDVTList VTs, EVT VT, SDLoc dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO);
SDValue getMaskedScatter(SDVTList VTs, EVT VT, SDLoc dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO);
/// Construct a node to track a Value* through the backend.
SDValue getSrcValue(const Value *v);
/// Return an MDNodeSDNode which holds an MDNode.
SDValue getMDNode(const MDNode *MD);
/// Return a bitcast using the SDLoc of the value operand, and casting to the
/// provided type. Use getNode to set a custom SDLoc.
SDValue getBitcast(EVT VT, SDValue V);
/// Return an AddrSpaceCastSDNode.
SDValue getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
unsigned SrcAS, unsigned DestAS);
/// Return the specified value casted to
/// the target's desired shift amount type.
SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op);
/// *Mutate* the specified node in-place to have the
/// specified operands. If the resultant node already exists in the DAG,
/// this does not modify the specified node, instead it returns the node that
/// already exists. If the resultant node does not exist in the DAG, the
/// input node is returned. As a degenerate case, if you specify the same
/// input operands as the node already has, the input node is returned.
SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3);
SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4);
SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4, SDValue Op5);
SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
/// These are used for target selectors to *mutate* the
/// specified node to have the specified return type, Target opcode, and
/// operands. Note that target opcodes are stored as
/// ~TargetOpcode in the node opcode field. The resultant node is returned.
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT, SDValue Op1);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
SDValue Op1, SDValue Op2);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
SDValue Op1, SDValue Op2, SDValue Op3);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1, EVT VT2);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
EVT VT2, ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
EVT VT2, EVT VT3, EVT VT4, ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
EVT VT2, SDValue Op1);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
EVT VT2, SDValue Op1, SDValue Op2);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
EVT VT2, EVT VT3, SDValue Op1, SDValue Op2, SDValue Op3);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, SDVTList VTs,
ArrayRef<SDValue> Ops);
/// This *mutates* the specified node to have the specified
/// return type, opcode, and operands.
SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
ArrayRef<SDValue> Ops);
/// These are used for target selectors to create a new node
/// with specified return type(s), MachineInstr opcode, and operands.
///
/// Note that getMachineNode returns the resultant node. If there is already
/// a node of the specified opcode and operands, it returns that node instead
/// of the current one.
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
SDValue Op1);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
SDValue Op1, SDValue Op2, SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
SDValue Op1);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
SDValue Op1, SDValue Op2, SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
EVT VT3, SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
EVT VT3, SDValue Op1, SDValue Op2,
SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
EVT VT3, ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
EVT VT3, EVT VT4, ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl,
ArrayRef<EVT> ResultTys,
ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, SDVTList VTs,
ArrayRef<SDValue> Ops);
/// A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
SDValue Operand);
/// A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
SDValue Operand, SDValue Subreg);
/// Get the specified node if it's already available, or else return NULL.
SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTs, ArrayRef<SDValue> Ops,
const SDNodeFlags *Flags = nullptr);
/// Creates a SDDbgValue node.
SDDbgValue *getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N, unsigned R,
bool IsIndirect, uint64_t Off, DebugLoc DL,
unsigned O);
/// Constant
SDDbgValue *getConstantDbgValue(MDNode *Var, MDNode *Expr, const Value *C,
uint64_t Off, DebugLoc DL, unsigned O);
/// FrameIndex
SDDbgValue *getFrameIndexDbgValue(MDNode *Var, MDNode *Expr, unsigned FI,
uint64_t Off, DebugLoc DL, unsigned O);
/// Remove the specified node from the system. If any of its
/// operands then becomes dead, remove them as well. Inform UpdateListener
/// for each node deleted.
void RemoveDeadNode(SDNode *N);
/// This method deletes the unreachable nodes in the
/// given list, and any nodes that become unreachable as a result.
void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);
/// Modify anything using 'From' to use 'To' instead.
/// This can cause recursive merging of nodes in the DAG. Use the first
/// version if 'From' is known to have a single result, use the second
/// if you have two nodes with identical results (or if 'To' has a superset
/// of the results of 'From'), use the third otherwise.
///
/// These methods all take an optional UpdateListener, which (if not null) is
/// informed about nodes that are deleted and modified due to recursive
/// changes in the dag.
///
/// These functions only replace all existing uses. It's possible that as
/// these replacements are being performed, CSE may cause the From node
/// to be given new uses. These new uses of From are left in place, and
/// not automatically transferred to To.
///
void ReplaceAllUsesWith(SDValue From, SDValue Op);
void ReplaceAllUsesWith(SDNode *From, SDNode *To);
void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
/// Replace any uses of From with To, leaving
/// uses of other values produced by From.Val alone.
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);
/// Like ReplaceAllUsesOfValueWith, but for multiple values at once.
/// This correctly handles the case where
/// there is an overlap between the From values and the To values.
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
unsigned Num);
/// Topological-sort the AllNodes list and a
/// assign a unique node id for each node in the DAG based on their
/// topological order. Returns the number of nodes.
unsigned AssignTopologicalOrder();
/// Move node N in the AllNodes list to be immediately
/// before the given iterator Position. This may be used to update the
/// topological ordering when the list of nodes is modified.
void RepositionNode(allnodes_iterator Position, SDNode *N) {
AllNodes.insert(Position, AllNodes.remove(N));
}
/// Returns true if the opcode is a commutative binary operation.
static bool isCommutativeBinOp(unsigned Opcode) {
// FIXME: This should get its info from the td file, so that we can include
// target info.
switch (Opcode) {
case ISD::ADD:
case ISD::MUL:
case ISD::MULHU:
case ISD::MULHS:
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI:
case ISD::FADD:
case ISD::FMUL:
case ISD::AND:
case ISD::OR:
case ISD::XOR:
case ISD::SADDO:
case ISD::UADDO:
case ISD::ADDC:
case ISD::ADDE:
case ISD::FMINNUM:
case ISD::FMAXNUM:
return true;
default: return false;
}
}
/// Returns an APFloat semantics tag appropriate for the given type. If VT is
/// a vector type, the element semantics are returned.
static const fltSemantics &EVTToAPFloatSemantics(EVT VT) {
switch (VT.getScalarType().getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unknown FP format");
case MVT::f16: return APFloat::IEEEhalf;
case MVT::f32: return APFloat::IEEEsingle;
case MVT::f64: return APFloat::IEEEdouble;
case MVT::f80: return APFloat::x87DoubleExtended;
case MVT::f128: return APFloat::IEEEquad;
case MVT::ppcf128: return APFloat::PPCDoubleDouble;
}
}
/// Add a dbg_value SDNode. If SD is non-null that means the
/// value is produced by SD.
void AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter);
/// Get the debug values which reference the given SDNode.
ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) {
return DbgInfo->getSDDbgValues(SD);
}
/// Transfer SDDbgValues.
void TransferDbgValues(SDValue From, SDValue To);
/// Return true if there are any SDDbgValue nodes associated
/// with this SelectionDAG.
bool hasDebugValues() const { return !DbgInfo->empty(); }
SDDbgInfo::DbgIterator DbgBegin() { return DbgInfo->DbgBegin(); }
SDDbgInfo::DbgIterator DbgEnd() { return DbgInfo->DbgEnd(); }
SDDbgInfo::DbgIterator ByvalParmDbgBegin() {
return DbgInfo->ByvalParmDbgBegin();
}
SDDbgInfo::DbgIterator ByvalParmDbgEnd() {
return DbgInfo->ByvalParmDbgEnd();
}
void dump() const;
/// Create a stack temporary, suitable for holding the
/// specified value type. If minAlign is specified, the slot size will have
/// at least that alignment.
SDValue CreateStackTemporary(EVT VT, unsigned minAlign = 1);
/// Create a stack temporary suitable for holding
/// either of the specified value types.
SDValue CreateStackTemporary(EVT VT1, EVT VT2);
SDValue FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
SDNode *Cst1, SDNode *Cst2);
SDValue FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
const ConstantSDNode *Cst1,
const ConstantSDNode *Cst2);
/// Constant fold a setcc to true or false.
SDValue FoldSetCC(EVT VT, SDValue N1,
SDValue N2, ISD::CondCode Cond, SDLoc dl);
/// Return true if the sign bit of Op is known to be zero.
/// We use this predicate to simplify operations downstream.
bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;
/// Return true if 'Op & Mask' is known to be zero. We
/// use this predicate to simplify operations downstream. Op and Mask are
/// known to be the same type.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth = 0)
const;
/// Determine which bits of Op are known to be either zero or one and return
/// them in the KnownZero/KnownOne bitsets. Targets can implement the
/// computeKnownBitsForTargetNode method in the TargetLowering class to allow
/// target nodes to be understood.
void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne,
unsigned Depth = 0) const;
/// Return the number of times the sign bit of the
/// register is replicated into the other bits. We know that at least 1 bit
/// is always equal to the sign bit (itself), but other cases can give us
/// information. For example, immediately after an "SRA X, 2", we know that
/// the top 3 bits are all equal to each other, so we return 3. Targets can
/// implement the ComputeNumSignBitsForTarget method in the TargetLowering
/// class to allow target nodes to be understood.
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;
/// Return true if the specified operand is an
/// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
/// ISD::OR with a ConstantSDNode that is guaranteed to have the same
/// semantics as an ADD. This handles the equivalence:
/// X|Cst == X+Cst iff X&Cst = 0.
bool isBaseWithConstantOffset(SDValue Op) const;
/// Test whether the given SDValue is known to never be NaN.
bool isKnownNeverNaN(SDValue Op) const;
/// Test whether the given SDValue is known to never be
/// positive or negative Zero.
bool isKnownNeverZero(SDValue Op) const;
/// Test whether two SDValues are known to compare equal. This
/// is true if they are the same value, or if one is negative zero and the
/// other positive zero.
bool isEqualTo(SDValue A, SDValue B) const;
/// Utility function used by legalize and lowering to
/// "unroll" a vector operation by splitting out the scalars and operating
/// on each element individually. If the ResNE is 0, fully unroll the vector
/// op. If ResNE is less than the width of the vector op, unroll up to ResNE.
/// If the ResNE is greater than the width of the vector op, unroll the
/// vector op and fill the end of the resulting vector with UNDEFS.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE = 0);
/// Return true if LD is loading 'Bytes' bytes from a location that is 'Dist'
/// units away from the location that the 'Base' load is loading from.
bool isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
unsigned Bytes, int Dist) const;
/// Infer alignment of a load / store address. Return 0 if
/// it cannot be inferred.
unsigned InferPtrAlignment(SDValue Ptr) const;
/// Compute the VTs needed for the low/hi parts of a type
/// which is split (or expanded) into two not necessarily identical pieces.
std::pair<EVT, EVT> GetSplitDestVTs(const EVT &VT) const;
/// Split the vector with EXTRACT_SUBVECTOR using the provides
/// VTs and return the low/high part.
std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL,
const EVT &LoVT, const EVT &HiVT);
/// Split the vector with EXTRACT_SUBVECTOR and return the low/high part.
std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL) {
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = GetSplitDestVTs(N.getValueType());
return SplitVector(N, DL, LoVT, HiVT);
}
/// Split the node's operand with EXTRACT_SUBVECTOR and
/// return the low/high part.
std::pair<SDValue, SDValue> SplitVectorOperand(const SDNode *N, unsigned OpNo)
{
return SplitVector(N->getOperand(OpNo), SDLoc(N));
}
/// Append the extracted elements from Start to Count out of the vector Op
/// in Args. If Count is 0, all of the elements will be extracted.
void ExtractVectorElements(SDValue Op, SmallVectorImpl<SDValue> &Args,
unsigned Start = 0, unsigned Count = 0);
unsigned getEVTAlignment(EVT MemoryVT) const;
private:
void InsertNode(SDNode *N);
bool RemoveNodeFromCSEMaps(SDNode *N);
void AddModifiedNodeToCSEMaps(SDNode *N);
SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
void *&InsertPos);
SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
void *&InsertPos);
SDNode *UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc loc);
void DeleteNodeNotInCSEMaps(SDNode *N);
void DeallocateNode(SDNode *N);
void allnodes_clear();
BinarySDNode *GetBinarySDNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
SDValue N1, SDValue N2,
const SDNodeFlags *Flags = nullptr);
/// Look up the node specified by ID in CSEMap. If it exists, return it. If
/// not, return the insertion token that will make insertion faster. This
/// overload is for nodes other than Constant or ConstantFP, use the other one
/// for those.
SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
/// Look up the node specified by ID in CSEMap. If it exists, return it. If
/// not, return the insertion token that will make insertion faster. Performs
/// additional processing for constant nodes.
SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, DebugLoc DL,
void *&InsertPos);
/// List of non-single value types.
FoldingSet<SDVTListNode> VTListMap;
/// Maps to auto-CSE operations.
std::vector<CondCodeSDNode*> CondCodeNodes;
std::vector<SDNode*> ValueTypeNodes;
std::map<EVT, SDNode*, EVT::compareRawBits> ExtendedValueTypeNodes;
StringMap<SDNode*> ExternalSymbols;
std::map<std::pair<std::string, unsigned char>,SDNode*> TargetExternalSymbols;
DenseMap<MCSymbol *, SDNode *> MCSymbols;
};
template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
typedef SelectionDAG::allnodes_iterator nodes_iterator;
static nodes_iterator nodes_begin(SelectionDAG *G) {
return G->allnodes_begin();
}
static nodes_iterator nodes_end(SelectionDAG *G) {
return G->allnodes_end();
}
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineConstantPool.h | //===-- CodeGen/MachineConstantPool.h - Abstract Constant Pool --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// @file
/// This file declares the MachineConstantPool class which is an abstract
/// constant pool to keep track of constants referenced by a function.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINECONSTANTPOOL_H
#define LLVM_CODEGEN_MACHINECONSTANTPOOL_H
#include "llvm/ADT/DenseSet.h"
#include "llvm/MC/SectionKind.h"
#include <cassert>
#include <climits>
#include <vector>
namespace llvm {
class Constant;
class FoldingSetNodeID;
class DataLayout;
class TargetMachine;
class Type;
class MachineConstantPool;
class raw_ostream;
/// Abstract base class for all machine specific constantpool value subclasses.
///
class MachineConstantPoolValue {
virtual void anchor();
Type *Ty;
public:
explicit MachineConstantPoolValue(Type *ty) : Ty(ty) {}
virtual ~MachineConstantPoolValue() {}
/// getType - get type of this MachineConstantPoolValue.
///
Type *getType() const { return Ty; }
/// getRelocationInfo - This method classifies the entry according to
/// whether or not it may generate a relocation entry. This must be
/// conservative, so if it might codegen to a relocatable entry, it should say
/// so. The return values are the same as Constant::getRelocationInfo().
virtual unsigned getRelocationInfo() const = 0;
virtual int getExistingMachineCPValue(MachineConstantPool *CP,
unsigned Alignment) = 0;
virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID) = 0;
/// print - Implement operator<<
virtual void print(raw_ostream &O) const = 0;
};
inline raw_ostream &operator<<(raw_ostream &OS,
const MachineConstantPoolValue &V) {
V.print(OS);
return OS;
}
/// This class is a data container for one entry in a MachineConstantPool.
/// It contains a pointer to the value and an offset from the start of
/// the constant pool.
/// @brief An entry in a MachineConstantPool
class MachineConstantPoolEntry {
public:
/// The constant itself.
union {
const Constant *ConstVal;
MachineConstantPoolValue *MachineCPVal;
} Val;
/// The required alignment for this entry. The top bit is set when Val is
/// a target specific MachineConstantPoolValue.
unsigned Alignment;
MachineConstantPoolEntry(const Constant *V, unsigned A)
: Alignment(A) {
Val.ConstVal = V;
}
MachineConstantPoolEntry(MachineConstantPoolValue *V, unsigned A)
: Alignment(A) {
Val.MachineCPVal = V;
Alignment |= 1U << (sizeof(unsigned)*CHAR_BIT-1);
}
/// isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry
/// is indeed a target specific constantpool entry, not a wrapper over a
/// Constant.
bool isMachineConstantPoolEntry() const {
return (int)Alignment < 0;
}
int getAlignment() const {
return Alignment & ~(1 << (sizeof(unsigned)*CHAR_BIT-1));
}
Type *getType() const;
/// getRelocationInfo - This method classifies the entry according to
/// whether or not it may generate a relocation entry. This must be
/// conservative, so if it might codegen to a relocatable entry, it should say
/// so. The return values are:
///
/// 0: This constant pool entry is guaranteed to never have a relocation
/// applied to it (because it holds a simple constant like '4').
/// 1: This entry has relocations, but the entries are guaranteed to be
/// resolvable by the static linker, so the dynamic linker will never see
/// them.
/// 2: This entry may have arbitrary relocations.
unsigned getRelocationInfo() const;
SectionKind getSectionKind(const DataLayout *DL) const;
};
/// The MachineConstantPool class keeps track of constants referenced by a
/// function which must be spilled to memory. This is used for constants which
/// are unable to be used directly as operands to instructions, which typically
/// include floating point and large integer constants.
///
/// Instructions reference the address of these constant pool constants through
/// the use of MO_ConstantPoolIndex values. When emitting assembly or machine
/// code, these virtual address references are converted to refer to the
/// address of the function constant pool values.
/// @brief The machine constant pool.
class MachineConstantPool {
unsigned PoolAlignment; ///< The alignment for the pool.
std::vector<MachineConstantPoolEntry> Constants; ///< The pool of constants.
/// MachineConstantPoolValues that use an existing MachineConstantPoolEntry.
DenseSet<MachineConstantPoolValue*> MachineCPVsSharingEntries;
const DataLayout &DL;
const DataLayout &getDataLayout() const { return DL; }
public:
/// @brief The only constructor.
explicit MachineConstantPool(const DataLayout &DL)
: PoolAlignment(1), DL(DL) {}
~MachineConstantPool();
/// getConstantPoolAlignment - Return the alignment required by
/// the whole constant pool, of which the first element must be aligned.
unsigned getConstantPoolAlignment() const { return PoolAlignment; }
/// getConstantPoolIndex - Create a new entry in the constant pool or return
/// an existing one. User must specify the minimum required alignment for
/// the object.
unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment);
unsigned getConstantPoolIndex(MachineConstantPoolValue *V,unsigned Alignment);
/// isEmpty - Return true if this constant pool contains no constants.
bool isEmpty() const { return Constants.empty(); }
const std::vector<MachineConstantPoolEntry> &getConstants() const {
return Constants;
}
/// print - Used by the MachineFunction printer to print information about
/// constant pool objects. Implemented in MachineFunction.cpp
///
void print(raw_ostream &OS) const;
/// dump - Call print(cerr) to be called from the debugger.
void dump() const;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachORelocation.h | //=== MachORelocation.h - Mach-O Relocation Info ----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the MachORelocation class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHORELOCATION_H
#define LLVM_CODEGEN_MACHORELOCATION_H
#include "llvm/Support/DataTypes.h"
namespace llvm {
/// MachORelocation - This struct contains information about each relocation
/// that needs to be emitted to the file.
/// see <mach-o/reloc.h>
class MachORelocation {
uint32_t r_address; // offset in the section to what is being relocated
uint32_t r_symbolnum; // symbol index if r_extern == 1 else section index
bool r_pcrel; // was relocated pc-relative already
uint8_t r_length; // length = 2 ^ r_length
bool r_extern; //
uint8_t r_type; // if not 0, machine-specific relocation type.
bool r_scattered; // 1 = scattered, 0 = non-scattered
int32_t r_value; // the value the item to be relocated is referring
// to.
public:
uint32_t getPackedFields() const {
if (r_scattered)
return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) |
((r_type & 15) << 24) | (r_address & 0x00FFFFFF);
else
return (r_symbolnum << 8) | (r_pcrel << 7) | ((r_length & 3) << 5) |
(r_extern << 4) | (r_type & 15);
}
uint32_t getAddress() const { return r_scattered ? r_value : r_address; }
uint32_t getRawAddress() const { return r_address; }
MachORelocation(uint32_t addr, uint32_t index, bool pcrel, uint8_t len,
bool ext, uint8_t type, bool scattered = false,
int32_t value = 0) :
r_address(addr), r_symbolnum(index), r_pcrel(pcrel), r_length(len),
r_extern(ext), r_type(type), r_scattered(scattered), r_value(value) {}
};
} // end llvm namespace
#endif // LLVM_CODEGEN_MACHORELOCATION_H
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/RegisterPressure.h | //===-- RegisterPressure.h - Dynamic Register Pressure -*- C++ -*-------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the RegisterPressure class which can be used to track
// MachineInstr level register pressure.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_REGISTERPRESSURE_H
#define LLVM_CODEGEN_REGISTERPRESSURE_H
#include "llvm/ADT/SparseSet.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
class LiveIntervals;
class LiveRange;
class RegisterClassInfo;
class MachineInstr;
/// Base class for register pressure results.
struct RegisterPressure {
/// Map of max reg pressure indexed by pressure set ID, not class ID.
std::vector<unsigned> MaxSetPressure;
/// List of live in virtual registers or physical register units.
SmallVector<unsigned,8> LiveInRegs;
SmallVector<unsigned,8> LiveOutRegs;
void dump(const TargetRegisterInfo *TRI) const;
};
/// RegisterPressure computed within a region of instructions delimited by
/// TopIdx and BottomIdx. During pressure computation, the maximum pressure per
/// register pressure set is increased. Once pressure within a region is fully
/// computed, the live-in and live-out sets are recorded.
///
/// This is preferable to RegionPressure when LiveIntervals are available,
/// because delimiting regions by SlotIndex is more robust and convenient than
/// holding block iterators. The block contents can change without invalidating
/// the pressure result.
struct IntervalPressure : RegisterPressure {
/// Record the boundary of the region being tracked.
SlotIndex TopIdx;
SlotIndex BottomIdx;
void reset();
void openTop(SlotIndex NextTop);
void openBottom(SlotIndex PrevBottom);
};
/// RegisterPressure computed within a region of instructions delimited by
/// TopPos and BottomPos. This is a less precise version of IntervalPressure for
/// use when LiveIntervals are unavailable.
struct RegionPressure : RegisterPressure {
/// Record the boundary of the region being tracked.
MachineBasicBlock::const_iterator TopPos;
MachineBasicBlock::const_iterator BottomPos;
void reset();
void openTop(MachineBasicBlock::const_iterator PrevTop);
void openBottom(MachineBasicBlock::const_iterator PrevBottom);
};
/// Capture a change in pressure for a single pressure set. UnitInc may be
/// expressed in terms of upward or downward pressure depending on the client
/// and will be dynamically adjusted for current liveness.
///
/// Pressure increments are tiny, typically 1-2 units, and this is only for
/// heuristics, so we don't check UnitInc overflow. Instead, we may have a
/// higher level assert that pressure is consistent within a region. We also
/// effectively ignore dead defs which don't affect heuristics much.
class PressureChange {
uint16_t PSetID; // ID+1. 0=Invalid.
int16_t UnitInc;
public:
PressureChange(): PSetID(0), UnitInc(0) {}
PressureChange(unsigned id): PSetID(id+1), UnitInc(0) {
assert(id < UINT16_MAX && "PSetID overflow.");
}
bool isValid() const { return PSetID > 0; }
unsigned getPSet() const {
assert(isValid() && "invalid PressureChange");
return PSetID - 1;
}
// If PSetID is invalid, return UINT16_MAX to give it lowest priority.
unsigned getPSetOrMax() const { return (PSetID - 1) & UINT16_MAX; }
int getUnitInc() const { return UnitInc; }
void setUnitInc(int Inc) { UnitInc = Inc; }
bool operator==(const PressureChange &RHS) const {
return PSetID == RHS.PSetID && UnitInc == RHS.UnitInc;
}
};
template <> struct isPodLike<PressureChange> {
static const bool value = true;
};
/// List of PressureChanges in order of increasing, unique PSetID.
///
/// Use a small fixed number, because we can fit more PressureChanges in an
/// empty SmallVector than ever need to be tracked per register class. If more
/// PSets are affected, then we only track the most constrained.
class PressureDiff {
// The initial design was for MaxPSets=4, but that requires PSet partitions,
// which are not yet implemented. (PSet partitions are equivalent PSets given
// the register classes actually in use within the scheduling region.)
enum { MaxPSets = 16 };
PressureChange PressureChanges[MaxPSets];
public:
typedef PressureChange* iterator;
typedef const PressureChange* const_iterator;
iterator begin() { return &PressureChanges[0]; }
iterator end() { return &PressureChanges[MaxPSets]; }
const_iterator begin() const { return &PressureChanges[0]; }
const_iterator end() const { return &PressureChanges[MaxPSets]; }
void addPressureChange(unsigned RegUnit, bool IsDec,
const MachineRegisterInfo *MRI);
LLVM_DUMP_METHOD void dump(const TargetRegisterInfo &TRI) const;
};
/// Array of PressureDiffs.
class PressureDiffs {
PressureDiff *PDiffArray;
unsigned Size;
unsigned Max;
public:
PressureDiffs(): PDiffArray(nullptr), Size(0), Max(0) {}
~PressureDiffs() { delete[] PDiffArray; } // HLSL Change: Use overridable operator delete
void clear() { Size = 0; }
void init(unsigned N);
PressureDiff &operator[](unsigned Idx) {
assert(Idx < Size && "PressureDiff index out of bounds");
return PDiffArray[Idx];
}
const PressureDiff &operator[](unsigned Idx) const {
return const_cast<PressureDiffs*>(this)->operator[](Idx);
}
};
/// Store the effects of a change in pressure on things that MI scheduler cares
/// about.
///
/// Excess records the value of the largest difference in register units beyond
/// the target's pressure limits across the affected pressure sets, where
/// largest is defined as the absolute value of the difference. Negative
/// ExcessUnits indicates a reduction in pressure that had already exceeded the
/// target's limits.
///
/// CriticalMax records the largest increase in the tracker's max pressure that
/// exceeds the critical limit for some pressure set determined by the client.
///
/// CurrentMax records the largest increase in the tracker's max pressure that
/// exceeds the current limit for some pressure set determined by the client.
struct RegPressureDelta {
PressureChange Excess;
PressureChange CriticalMax;
PressureChange CurrentMax;
RegPressureDelta() {}
bool operator==(const RegPressureDelta &RHS) const {
return Excess == RHS.Excess && CriticalMax == RHS.CriticalMax
&& CurrentMax == RHS.CurrentMax;
}
bool operator!=(const RegPressureDelta &RHS) const {
return !operator==(RHS);
}
};
/// \brief A set of live virtual registers and physical register units.
///
/// Virtual and physical register numbers require separate sparse sets, but most
/// of the RegisterPressureTracker handles them uniformly.
struct LiveRegSet {
SparseSet<unsigned> PhysRegs;
SparseSet<unsigned, VirtReg2IndexFunctor> VirtRegs;
bool contains(unsigned Reg) const {
if (TargetRegisterInfo::isVirtualRegister(Reg))
return VirtRegs.count(Reg);
return PhysRegs.count(Reg);
}
bool insert(unsigned Reg) {
if (TargetRegisterInfo::isVirtualRegister(Reg))
return VirtRegs.insert(Reg).second;
return PhysRegs.insert(Reg).second;
}
bool erase(unsigned Reg) {
if (TargetRegisterInfo::isVirtualRegister(Reg))
return VirtRegs.erase(Reg);
return PhysRegs.erase(Reg);
}
};
/// Track the current register pressure at some position in the instruction
/// stream, and remember the high water mark within the region traversed. This
/// does not automatically consider live-through ranges. The client may
/// independently adjust for global liveness.
///
/// Each RegPressureTracker only works within a MachineBasicBlock. Pressure can
/// be tracked across a larger region by storing a RegisterPressure result at
/// each block boundary and explicitly adjusting pressure to account for block
/// live-in and live-out register sets.
///
/// RegPressureTracker holds a reference to a RegisterPressure result that it
/// computes incrementally. During downward tracking, P.BottomIdx or P.BottomPos
/// is invalid until it reaches the end of the block or closeRegion() is
/// explicitly called. Similarly, P.TopIdx is invalid during upward
/// tracking. Changing direction has the side effect of closing region, and
/// traversing past TopIdx or BottomIdx reopens it.
class RegPressureTracker {
const MachineFunction *MF;
const TargetRegisterInfo *TRI;
const RegisterClassInfo *RCI;
const MachineRegisterInfo *MRI;
const LiveIntervals *LIS;
/// We currently only allow pressure tracking within a block.
const MachineBasicBlock *MBB;
/// Track the max pressure within the region traversed so far.
RegisterPressure &P;
/// Run in two modes dependending on whether constructed with IntervalPressure
/// or RegisterPressure. If requireIntervals is false, LIS are ignored.
bool RequireIntervals;
/// True if UntiedDefs will be populated.
bool TrackUntiedDefs;
/// Register pressure corresponds to liveness before this instruction
/// iterator. It may point to the end of the block or a DebugValue rather than
/// an instruction.
MachineBasicBlock::const_iterator CurrPos;
/// Pressure map indexed by pressure set ID, not class ID.
std::vector<unsigned> CurrSetPressure;
/// Set of live registers.
LiveRegSet LiveRegs;
/// Set of vreg defs that start a live range.
SparseSet<unsigned, VirtReg2IndexFunctor> UntiedDefs;
/// Live-through pressure.
std::vector<unsigned> LiveThruPressure;
public:
RegPressureTracker(IntervalPressure &rp) :
MF(nullptr), TRI(nullptr), RCI(nullptr), LIS(nullptr), MBB(nullptr), P(rp),
RequireIntervals(true), TrackUntiedDefs(false) {}
RegPressureTracker(RegionPressure &rp) :
MF(nullptr), TRI(nullptr), RCI(nullptr), LIS(nullptr), MBB(nullptr), P(rp),
RequireIntervals(false), TrackUntiedDefs(false) {}
void reset();
void init(const MachineFunction *mf, const RegisterClassInfo *rci,
const LiveIntervals *lis, const MachineBasicBlock *mbb,
MachineBasicBlock::const_iterator pos,
bool ShouldTrackUntiedDefs = false);
/// Force liveness of virtual registers or physical register
/// units. Particularly useful to initialize the livein/out state of the
/// tracker before the first call to advance/recede.
void addLiveRegs(ArrayRef<unsigned> Regs);
/// Get the MI position corresponding to this register pressure.
MachineBasicBlock::const_iterator getPos() const { return CurrPos; }
// Reset the MI position corresponding to the register pressure. This allows
// schedulers to move instructions above the RegPressureTracker's
// CurrPos. Since the pressure is computed before CurrPos, the iterator
// position changes while pressure does not.
void setPos(MachineBasicBlock::const_iterator Pos) { CurrPos = Pos; }
/// \brief Get the SlotIndex for the first nondebug instruction including or
/// after the current position.
SlotIndex getCurrSlot() const;
/// Recede across the previous instruction.
bool recede(SmallVectorImpl<unsigned> *LiveUses = nullptr,
PressureDiff *PDiff = nullptr);
/// Advance across the current instruction.
bool advance();
/// Finalize the region boundaries and recored live ins and live outs.
void closeRegion();
/// Initialize the LiveThru pressure set based on the untied defs found in
/// RPTracker.
void initLiveThru(const RegPressureTracker &RPTracker);
/// Copy an existing live thru pressure result.
void initLiveThru(ArrayRef<unsigned> PressureSet) {
LiveThruPressure.assign(PressureSet.begin(), PressureSet.end());
}
ArrayRef<unsigned> getLiveThru() const { return LiveThruPressure; }
/// Get the resulting register pressure over the traversed region.
/// This result is complete if either advance() or recede() has returned true,
/// or if closeRegion() was explicitly invoked.
RegisterPressure &getPressure() { return P; }
const RegisterPressure &getPressure() const { return P; }
/// Get the register set pressure at the current position, which may be less
/// than the pressure across the traversed region.
std::vector<unsigned> &getRegSetPressureAtPos() { return CurrSetPressure; }
void discoverLiveOut(unsigned Reg);
void discoverLiveIn(unsigned Reg);
bool isTopClosed() const;
bool isBottomClosed() const;
void closeTop();
void closeBottom();
/// Consider the pressure increase caused by traversing this instruction
/// bottom-up. Find the pressure set with the most change beyond its pressure
/// limit based on the tracker's current pressure, and record the number of
/// excess register units of that pressure set introduced by this instruction.
void getMaxUpwardPressureDelta(const MachineInstr *MI,
PressureDiff *PDiff,
RegPressureDelta &Delta,
ArrayRef<PressureChange> CriticalPSets,
ArrayRef<unsigned> MaxPressureLimit);
void getUpwardPressureDelta(const MachineInstr *MI,
/*const*/ PressureDiff &PDiff,
RegPressureDelta &Delta,
ArrayRef<PressureChange> CriticalPSets,
ArrayRef<unsigned> MaxPressureLimit) const;
/// Consider the pressure increase caused by traversing this instruction
/// top-down. Find the pressure set with the most change beyond its pressure
/// limit based on the tracker's current pressure, and record the number of
/// excess register units of that pressure set introduced by this instruction.
void getMaxDownwardPressureDelta(const MachineInstr *MI,
RegPressureDelta &Delta,
ArrayRef<PressureChange> CriticalPSets,
ArrayRef<unsigned> MaxPressureLimit);
/// Find the pressure set with the most change beyond its pressure limit after
/// traversing this instruction either upward or downward depending on the
/// closed end of the current region.
void getMaxPressureDelta(const MachineInstr *MI,
RegPressureDelta &Delta,
ArrayRef<PressureChange> CriticalPSets,
ArrayRef<unsigned> MaxPressureLimit) {
if (isTopClosed())
return getMaxDownwardPressureDelta(MI, Delta, CriticalPSets,
MaxPressureLimit);
assert(isBottomClosed() && "Uninitialized pressure tracker");
return getMaxUpwardPressureDelta(MI, nullptr, Delta, CriticalPSets,
MaxPressureLimit);
}
/// Get the pressure of each PSet after traversing this instruction bottom-up.
void getUpwardPressure(const MachineInstr *MI,
std::vector<unsigned> &PressureResult,
std::vector<unsigned> &MaxPressureResult);
/// Get the pressure of each PSet after traversing this instruction top-down.
void getDownwardPressure(const MachineInstr *MI,
std::vector<unsigned> &PressureResult,
std::vector<unsigned> &MaxPressureResult);
void getPressureAfterInst(const MachineInstr *MI,
std::vector<unsigned> &PressureResult,
std::vector<unsigned> &MaxPressureResult) {
if (isTopClosed())
return getUpwardPressure(MI, PressureResult, MaxPressureResult);
assert(isBottomClosed() && "Uninitialized pressure tracker");
return getDownwardPressure(MI, PressureResult, MaxPressureResult);
}
bool hasUntiedDef(unsigned VirtReg) const {
return UntiedDefs.count(VirtReg);
}
void dump() const;
protected:
const LiveRange *getLiveRange(unsigned Reg) const;
void increaseRegPressure(ArrayRef<unsigned> Regs);
void decreaseRegPressure(ArrayRef<unsigned> Regs);
void bumpUpwardPressure(const MachineInstr *MI);
void bumpDownwardPressure(const MachineInstr *MI);
};
void dumpRegSetPressure(ArrayRef<unsigned> SetPressure,
const TargetRegisterInfo *TRI);
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/BasicTTIImpl.h | //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides a helper that implements much of the TTI interface in
/// terms of the target-independent code generator and TargetLowering
/// interfaces.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
#define LLVM_CODEGEN_BASICTTIIMPL_H
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfoImpl.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
namespace llvm {
extern cl::opt<unsigned> PartialUnrollingThreshold;
/// \brief Base class which can be used to help build a TTI implementation.
///
/// This class provides as much implementation of the TTI interface as is
/// possible using the target independent parts of the code generator.
///
/// In order to subclass it, your class must implement a getST() method to
/// return the subtarget, and a getTLI() method to return the target lowering.
/// We need these methods implemented in the derived class so that this class
/// doesn't have to duplicate storage for them.
template <typename T>
class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
private:
typedef TargetTransformInfoImplCRTPBase<T> BaseT;
typedef TargetTransformInfo TTI;
/// Estimate the overhead of scalarizing an instruction. Insert and Extract
/// are set if the result needs to be inserted and/or extracted from vectors.
unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
assert(Ty->isVectorTy() && "Can only scalarize vectors");
unsigned Cost = 0;
for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
if (Insert)
Cost += static_cast<T *>(this)
->getVectorInstrCost(Instruction::InsertElement, Ty, i);
if (Extract)
Cost += static_cast<T *>(this)
->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
}
return Cost;
}
/// Estimate the cost overhead of SK_Alternate shuffle.
unsigned getAltShuffleOverhead(Type *Ty) {
assert(Ty->isVectorTy() && "Can only shuffle vectors");
unsigned Cost = 0;
// Shuffle cost is equal to the cost of extracting element from its argument
// plus the cost of inserting them onto the result vector.
// e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
// index 0 of first vector, index 1 of second vector,index 2 of first
// vector and finally index 3 of second vector and insert them at index
// <0,1,2,3> of result vector.
for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
Cost += static_cast<T *>(this)
->getVectorInstrCost(Instruction::InsertElement, Ty, i);
Cost += static_cast<T *>(this)
->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
}
return Cost;
}
/// \brief Local query method delegates up to T which *must* implement this!
const TargetSubtargetInfo *getST() const {
return static_cast<const T *>(this)->getST();
}
/// \brief Local query method delegates up to T which *must* implement this!
const TargetLoweringBase *getTLI() const {
return static_cast<const T *>(this)->getTLI();
}
protected:
explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
: BaseT(DL) {}
using TargetTransformInfoImplBase::DL;
public:
// Provide value semantics. MSVC requires that we spell all of these out.
BasicTTIImplBase(const BasicTTIImplBase &Arg)
: BaseT(static_cast<const BaseT &>(Arg)) {}
BasicTTIImplBase(BasicTTIImplBase &&Arg)
: BaseT(std::move(static_cast<BaseT &>(Arg))) {}
/// \name Scalar TTI Implementations
/// @{
bool hasBranchDivergence() { return false; }
bool isSourceOfDivergence(const Value *V) { return false; }
bool isLegalAddImmediate(int64_t imm) {
return getTLI()->isLegalAddImmediate(imm);
}
bool isLegalICmpImmediate(int64_t imm) {
return getTLI()->isLegalICmpImmediate(imm);
}
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale,
unsigned AddrSpace) {
TargetLoweringBase::AddrMode AM;
AM.BaseGV = BaseGV;
AM.BaseOffs = BaseOffset;
AM.HasBaseReg = HasBaseReg;
AM.Scale = Scale;
return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace);
}
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
TargetLoweringBase::AddrMode AM;
AM.BaseGV = BaseGV;
AM.BaseOffs = BaseOffset;
AM.HasBaseReg = HasBaseReg;
AM.Scale = Scale;
return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
}
bool isTruncateFree(Type *Ty1, Type *Ty2) {
return getTLI()->isTruncateFree(Ty1, Ty2);
}
bool isProfitableToHoist(Instruction *I) {
return getTLI()->isProfitableToHoist(I);
}
bool isTypeLegal(Type *Ty) {
EVT VT = getTLI()->getValueType(DL, Ty);
return getTLI()->isTypeLegal(VT);
}
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) {
return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
}
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> ParamTys) {
if (IID == Intrinsic::cttz) {
if (getTLI()->isCheapToSpeculateCttz())
return TargetTransformInfo::TCC_Basic;
return TargetTransformInfo::TCC_Expensive;
}
if (IID == Intrinsic::ctlz) {
if (getTLI()->isCheapToSpeculateCtlz())
return TargetTransformInfo::TCC_Basic;
return TargetTransformInfo::TCC_Expensive;
}
return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
}
unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
bool shouldBuildLookupTables() {
const TargetLoweringBase *TLI = getTLI();
return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
}
bool haveFastSqrt(Type *Ty) {
const TargetLoweringBase *TLI = getTLI();
EVT VT = TLI->getValueType(DL, Ty);
return TLI->isTypeLegal(VT) &&
TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
}
unsigned getFPOpCost(Type *Ty) {
// By default, FP instructions are no more expensive since they are
// implemented in HW. Target specific TTI can override this.
return TargetTransformInfo::TCC_Basic;
}
unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
const TargetLoweringBase *TLI = getTLI();
switch (Opcode) {
default: break;
case Instruction::Trunc: {
if (TLI->isTruncateFree(OpTy, Ty))
return TargetTransformInfo::TCC_Free;
return TargetTransformInfo::TCC_Basic;
}
case Instruction::ZExt: {
if (TLI->isZExtFree(OpTy, Ty))
return TargetTransformInfo::TCC_Free;
return TargetTransformInfo::TCC_Basic;
}
}
return BaseT::getOperationCost(Opcode, Ty, OpTy);
}
void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP) {
// This unrolling functionality is target independent, but to provide some
// motivation for its intended use, for x86:
// According to the Intel 64 and IA-32 Architectures Optimization Reference
// Manual, Intel Core models and later have a loop stream detector (and
// associated uop queue) that can benefit from partial unrolling.
// The relevant requirements are:
// - The loop must have no more than 4 (8 for Nehalem and later) branches
// taken, and none of them may be calls.
// - The loop can have no more than 18 (28 for Nehalem and later) uops.
// According to the Software Optimization Guide for AMD Family 15h
// Processors, models 30h-4fh (Steamroller and later) have a loop predictor
// and loop buffer which can benefit from partial unrolling.
// The relevant requirements are:
// - The loop must have fewer than 16 branches
// - The loop must have less than 40 uops in all executed loop branches
// The number of taken branches in a loop is hard to estimate here, and
// benchmarking has revealed that it is better not to be conservative when
// estimating the branch count. As a result, we'll ignore the branch limits
// until someone finds a case where it matters in practice.
unsigned MaxOps;
const TargetSubtargetInfo *ST = getST();
if (PartialUnrollingThreshold.getNumOccurrences() > 0)
MaxOps = PartialUnrollingThreshold;
else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
else
return;
// Scan the loop: don't unroll loops with calls.
for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
++I) {
BasicBlock *BB = *I;
for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
ImmutableCallSite CS(J);
if (const Function *F = CS.getCalledFunction()) {
if (!static_cast<T *>(this)->isLoweredToCall(F))
continue;
}
return;
}
}
// Enable runtime and partial unrolling up to the specified size.
UP.Partial = UP.Runtime = true;
UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
}
/// @}
/// \name Vector TTI Implementations
/// @{
unsigned getNumberOfRegisters(bool Vector) { return Vector ? 0 : 1; }
unsigned getRegisterBitWidth(bool Vector) { return 32; }
unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
unsigned getArithmeticInstrCost(
unsigned Opcode, Type *Ty,
TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None) {
// Check if any of the operands are vector operands.
const TargetLoweringBase *TLI = getTLI();
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
// Assume that floating point arithmetic operations cost twice as much as
// integer operations.
unsigned OpCost = (IsFloat ? 2 : 1);
if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
// The operation is legal. Assume it costs 1.
// If the type is split to multiple registers, assume that there is some
// overhead to this.
// TODO: Once we have extract/insert subvector cost we need to use them.
if (LT.first > 1)
return LT.first * 2 * OpCost;
return LT.first * 1 * OpCost;
}
if (!TLI->isOperationExpand(ISD, LT.second)) {
// If the operation is custom lowered then assume
// thare the code is twice as expensive.
return LT.first * 2 * OpCost;
}
// Else, assume that we need to scalarize this op.
if (Ty->isVectorTy()) {
unsigned Num = Ty->getVectorNumElements();
unsigned Cost = static_cast<T *>(this)
->getArithmeticInstrCost(Opcode, Ty->getScalarType());
// return the cost of multiple scalar invocation plus the cost of
// inserting
// and extracting the values.
return getScalarizationOverhead(Ty, true, true) + Num * Cost;
}
// We don't know anything about this scalar instruction.
return OpCost;
}
unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) {
if (Kind == TTI::SK_Alternate) {
return getAltShuffleOverhead(Tp);
}
return 1;
}
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
const TargetLoweringBase *TLI = getTLI();
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src);
std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst);
// Check for NOOP conversions.
if (SrcLT.first == DstLT.first &&
SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
// Bitcast between types that are legalized to the same type are free.
if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
return 0;
}
if (Opcode == Instruction::Trunc &&
TLI->isTruncateFree(SrcLT.second, DstLT.second))
return 0;
if (Opcode == Instruction::ZExt &&
TLI->isZExtFree(SrcLT.second, DstLT.second))
return 0;
// If the cast is marked as legal (or promote) then assume low cost.
if (SrcLT.first == DstLT.first &&
TLI->isOperationLegalOrPromote(ISD, DstLT.second))
return 1;
// Handle scalar conversions.
if (!Src->isVectorTy() && !Dst->isVectorTy()) {
// Scalar bitcasts are usually free.
if (Opcode == Instruction::BitCast)
return 0;
// Just check the op cost. If the operation is legal then assume it costs
// 1.
if (!TLI->isOperationExpand(ISD, DstLT.second))
return 1;
// Assume that illegal scalar instruction are expensive.
return 4;
}
// Check vector-to-vector casts.
if (Dst->isVectorTy() && Src->isVectorTy()) {
// If the cast is between same-sized registers, then the check is simple.
if (SrcLT.first == DstLT.first &&
SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
// Assume that Zext is done using AND.
if (Opcode == Instruction::ZExt)
return 1;
// Assume that sext is done using SHL and SRA.
if (Opcode == Instruction::SExt)
return 2;
// Just check the op cost. If the operation is legal then assume it
// costs
// 1 and multiply by the type-legalization overhead.
if (!TLI->isOperationExpand(ISD, DstLT.second))
return SrcLT.first * 1;
}
// If we are converting vectors and the operation is illegal, or
// if the vectors are legalized to different types, estimate the
// scalarization costs.
unsigned Num = Dst->getVectorNumElements();
unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
Opcode, Dst->getScalarType(), Src->getScalarType());
// Return the cost of multiple scalar invocation plus the cost of
// inserting and extracting the values.
return getScalarizationOverhead(Dst, true, true) + Num * Cost;
}
// We already handled vector-to-vector and scalar-to-scalar conversions.
// This
// is where we handle bitcast between vectors and scalars. We need to assume
// that the conversion is scalarized in one way or another.
if (Opcode == Instruction::BitCast)
// Illegal bitcasts are done by storing and loading from a stack slot.
return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
: 0) +
(Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
: 0);
llvm_unreachable("Unhandled cast");
}
unsigned getCFInstrCost(unsigned Opcode) {
// Branches are assumed to be predicted.
return 0;
}
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
const TargetLoweringBase *TLI = getTLI();
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
// Selects on vectors are actually vector selects.
if (ISD == ISD::SELECT) {
assert(CondTy && "CondTy must exist");
if (CondTy->isVectorTy())
ISD = ISD::VSELECT;
}
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
!TLI->isOperationExpand(ISD, LT.second)) {
// The operation is legal. Assume it costs 1. Multiply
// by the type-legalization overhead.
return LT.first * 1;
}
// Otherwise, assume that the cast is scalarized.
if (ValTy->isVectorTy()) {
unsigned Num = ValTy->getVectorNumElements();
if (CondTy)
CondTy = CondTy->getScalarType();
unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
Opcode, ValTy->getScalarType(), CondTy);
// Return the cost of multiple scalar invocation plus the cost of
// inserting
// and extracting the values.
return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
}
// Unknown scalar opcode.
return 1;
}
unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
std::pair<unsigned, MVT> LT =
getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
return LT.first;
}
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) {
assert(!Src->isVoidTy() && "Invalid type");
std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src);
// Assuming that all loads of legal types cost 1.
unsigned Cost = LT.first;
if (Src->isVectorTy() &&
Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
// This is a vector load that legalizes to a larger type than the vector
// itself. Unless the corresponding extending load or truncating store is
// legal, then this will scalarize.
TargetLowering::LegalizeAction LA = TargetLowering::Expand;
EVT MemVT = getTLI()->getValueType(DL, Src, true);
if (MemVT.isSimple() && MemVT != MVT::Other) {
if (Opcode == Instruction::Store)
LA = getTLI()->getTruncStoreAction(LT.second, MemVT.getSimpleVT());
else
LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
}
if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
// This is a vector load/store for some illegal type that is scalarized.
// We must account for the cost of building or decomposing the vector.
Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
Opcode == Instruction::Store);
}
}
return Cost;
}
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace) {
VectorType *VT = dyn_cast<VectorType>(VecTy);
assert(VT && "Expect a vector type for interleaved memory op");
unsigned NumElts = VT->getNumElements();
assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
unsigned NumSubElts = NumElts / Factor;
VectorType *SubVT = VectorType::get(VT->getElementType(), NumSubElts);
// Firstly, the cost of load/store operation.
unsigned Cost = getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace);
// Then plus the cost of interleave operation.
if (Opcode == Instruction::Load) {
// The interleave cost is similar to extract sub vectors' elements
// from the wide vector, and insert them into sub vectors.
//
// E.g. An interleaved load of factor 2 (with one member of index 0):
// %vec = load <8 x i32>, <8 x i32>* %ptr
// %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
// The cost is estimated as extract elements at 0, 2, 4, 6 from the
// <8 x i32> vector and insert them into a <4 x i32> vector.
assert(Indices.size() <= Factor &&
"Interleaved memory op has too many members");
for (unsigned Index : Indices) {
assert(Index < Factor && "Invalid index for interleaved memory op");
// Extract elements from loaded vector for each sub vector.
for (unsigned i = 0; i < NumSubElts; i++)
Cost += getVectorInstrCost(Instruction::ExtractElement, VT,
Index + i * Factor);
}
unsigned InsSubCost = 0;
for (unsigned i = 0; i < NumSubElts; i++)
InsSubCost += getVectorInstrCost(Instruction::InsertElement, SubVT, i);
Cost += Indices.size() * InsSubCost;
} else {
// The interleave cost is extract all elements from sub vectors, and
// insert them into the wide vector.
//
// E.g. An interleaved store of factor 2:
// %v0_v1 = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7>
// store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
// The cost is estimated as extract all elements from both <4 x i32>
// vectors and insert into the <8 x i32> vector.
unsigned ExtSubCost = 0;
for (unsigned i = 0; i < NumSubElts; i++)
ExtSubCost += getVectorInstrCost(Instruction::ExtractElement, SubVT, i);
Cost += Factor * ExtSubCost;
for (unsigned i = 0; i < NumElts; i++)
Cost += getVectorInstrCost(Instruction::InsertElement, VT, i);
}
return Cost;
}
unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> Tys) {
unsigned ISD = 0;
switch (IID) {
default: {
// Assume that we need to scalarize this intrinsic.
unsigned ScalarizationCost = 0;
unsigned ScalarCalls = 1;
Type *ScalarRetTy = RetTy;
if (RetTy->isVectorTy()) {
ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
ScalarRetTy = RetTy->getScalarType();
}
SmallVector<Type *, 4> ScalarTys;
for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
Type *Ty = Tys[i];
if (Ty->isVectorTy()) {
ScalarizationCost += getScalarizationOverhead(Ty, false, true);
ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
Ty = Ty->getScalarType();
}
ScalarTys.push_back(Ty);
}
if (ScalarCalls == 1)
return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
IID, ScalarRetTy, ScalarTys);
return ScalarCalls * ScalarCost + ScalarizationCost;
}
// Look for intrinsics that can be lowered directly or turned into a scalar
// intrinsic call.
case Intrinsic::sqrt:
ISD = ISD::FSQRT;
break;
case Intrinsic::sin:
ISD = ISD::FSIN;
break;
case Intrinsic::cos:
ISD = ISD::FCOS;
break;
case Intrinsic::exp:
ISD = ISD::FEXP;
break;
case Intrinsic::exp2:
ISD = ISD::FEXP2;
break;
case Intrinsic::log:
ISD = ISD::FLOG;
break;
case Intrinsic::log10:
ISD = ISD::FLOG10;
break;
case Intrinsic::log2:
ISD = ISD::FLOG2;
break;
case Intrinsic::fabs:
ISD = ISD::FABS;
break;
case Intrinsic::minnum:
ISD = ISD::FMINNUM;
break;
case Intrinsic::maxnum:
ISD = ISD::FMAXNUM;
break;
case Intrinsic::copysign:
ISD = ISD::FCOPYSIGN;
break;
case Intrinsic::floor:
ISD = ISD::FFLOOR;
break;
case Intrinsic::ceil:
ISD = ISD::FCEIL;
break;
case Intrinsic::trunc:
ISD = ISD::FTRUNC;
break;
case Intrinsic::nearbyint:
ISD = ISD::FNEARBYINT;
break;
case Intrinsic::rint:
ISD = ISD::FRINT;
break;
case Intrinsic::round:
ISD = ISD::FROUND;
break;
case Intrinsic::pow:
ISD = ISD::FPOW;
break;
case Intrinsic::fma:
ISD = ISD::FMA;
break;
case Intrinsic::fmuladd:
ISD = ISD::FMA;
break;
// FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
return 0;
case Intrinsic::masked_store:
return static_cast<T *>(this)
->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0);
case Intrinsic::masked_load:
return static_cast<T *>(this)
->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
}
const TargetLoweringBase *TLI = getTLI();
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
// The operation is legal. Assume it costs 1.
// If the type is split to multiple registers, assume that there is some
// overhead to this.
// TODO: Once we have extract/insert subvector cost we need to use them.
if (LT.first > 1)
return LT.first * 2;
return LT.first * 1;
}
if (!TLI->isOperationExpand(ISD, LT.second)) {
// If the operation is custom lowered then assume
// thare the code is twice as expensive.
return LT.first * 2;
}
// If we can't lower fmuladd into an FMA estimate the cost as a floating
// point mul followed by an add.
if (IID == Intrinsic::fmuladd)
return static_cast<T *>(this)
->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
static_cast<T *>(this)
->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
// Else, assume that we need to scalarize this intrinsic. For math builtins
// this will emit a costly libcall, adding call overhead and spills. Make it
// very expensive.
if (RetTy->isVectorTy()) {
unsigned ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
unsigned ScalarCalls = RetTy->getVectorNumElements();
SmallVector<Type *, 4> ScalarTys;
for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
Type *Ty = Tys[i];
if (Ty->isVectorTy())
Ty = Ty->getScalarType();
ScalarTys.push_back(Ty);
}
unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
IID, RetTy->getScalarType(), ScalarTys);
for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
if (Tys[i]->isVectorTy()) {
ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
}
}
return ScalarCalls * ScalarCost + ScalarizationCost;
}
// This is going to be turned into a library call, make it expensive.
return 10;
}
/// \brief Compute a cost of the given call instruction.
///
/// Compute the cost of calling function F with return type RetTy and
/// argument types Tys. F might be nullptr, in this case the cost of an
/// arbitrary call with the specified signature will be returned.
/// This is used, for instance, when we estimate call of a vector
/// counterpart of the given function.
/// \param F Called function, might be nullptr.
/// \param RetTy Return value types.
/// \param Tys Argument types.
/// \returns The cost of Call instruction.
unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
return 10;
}
unsigned getNumberOfParts(Type *Tp) {
std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Tp);
return LT.first;
}
unsigned getAddressComputationCost(Type *Ty, bool IsComplex) { return 0; }
unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) {
assert(Ty->isVectorTy() && "Expect a vector type");
unsigned NumVecElts = Ty->getVectorNumElements();
unsigned NumReduxLevels = Log2_32(NumVecElts);
unsigned ArithCost =
NumReduxLevels *
static_cast<T *>(this)->getArithmeticInstrCost(Opcode, Ty);
// Assume the pairwise shuffles add a cost.
unsigned ShuffleCost =
NumReduxLevels * (IsPairwise + 1) *
static_cast<T *>(this)
->getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts / 2, Ty);
return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
}
/// @}
};
/// \brief Concrete BasicTTIImpl that can be used if no further customization
/// is needed.
class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
typedef BasicTTIImplBase<BasicTTIImpl> BaseT;
friend class BasicTTIImplBase<BasicTTIImpl>;
const TargetSubtargetInfo *ST;
const TargetLoweringBase *TLI;
const TargetSubtargetInfo *getST() const { return ST; }
const TargetLoweringBase *getTLI() const { return TLI; }
public:
explicit BasicTTIImpl(const TargetMachine *ST, Function &F);
// Provide value semantics. MSVC requires that we spell all of these out.
BasicTTIImpl(const BasicTTIImpl &Arg)
: BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
BasicTTIImpl(BasicTTIImpl &&Arg)
: BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
TLI(std::move(Arg.TLI)) {}
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/DwarfStringPoolEntry.h | //===- llvm/CodeGen/DwarfStringPoolEntry.h - String pool entry --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H
#define LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H
#include "llvm/ADT/StringMap.h"
namespace llvm {
class MCSymbol;
/// Data for a string pool entry.
struct DwarfStringPoolEntry {
MCSymbol *Symbol;
unsigned Offset;
unsigned Index;
};
/// String pool entry reference.
struct DwarfStringPoolEntryRef {
const StringMapEntry<DwarfStringPoolEntry> *I = nullptr;
public:
DwarfStringPoolEntryRef() = default;
explicit DwarfStringPoolEntryRef(
const StringMapEntry<DwarfStringPoolEntry> &I)
: I(&I) {}
explicit operator bool() const { return I; }
MCSymbol *getSymbol() const {
assert(I->second.Symbol && "No symbol available!");
return I->second.Symbol;
}
unsigned getOffset() const { return I->second.Offset; }
unsigned getIndex() const { return I->second.Index; }
StringRef getString() const { return I->first(); }
bool operator==(const DwarfStringPoolEntryRef &X) const { return I == X.I; }
bool operator!=(const DwarfStringPoolEntryRef &X) const { return I != X.I; }
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineFunctionInitializer.h | //===- MachineFunctionInitalizer.h - machine function initializer ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares an interface that allows custom machine function
// initialization.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEFUNCTIONINITIALIZER_H
#define LLVM_CODEGEN_MACHINEFUNCTIONINITIALIZER_H
namespace llvm {
class MachineFunction;
/// This interface provides a way to initialize machine functions after they are
/// created by the machine function analysis pass.
class MachineFunctionInitializer {
virtual void anchor();
public:
virtual ~MachineFunctionInitializer() {}
/// Initialize the machine function.
///
/// Return true if error occurred.
virtual bool initializeMachineFunction(MachineFunction &MF) = 0;
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineDominanceFrontier.h | //===- llvm/CodeGen/MachineDominanceFrontier.h ------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
#define LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
#include "llvm/Analysis/DominanceFrontier.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
namespace llvm {
class MachineDominanceFrontier : public MachineFunctionPass {
ForwardDominanceFrontierBase<MachineBasicBlock> Base;
public:
typedef DominatorTreeBase<MachineBasicBlock> DomTreeT;
typedef DomTreeNodeBase<MachineBasicBlock> DomTreeNodeT;
typedef DominanceFrontierBase<MachineBasicBlock>::DomSetType DomSetType;
typedef DominanceFrontierBase<MachineBasicBlock>::iterator iterator;
typedef DominanceFrontierBase<MachineBasicBlock>::const_iterator const_iterator;
void operator=(const MachineDominanceFrontier &) = delete;
MachineDominanceFrontier(const MachineDominanceFrontier &) = delete;
static char ID;
MachineDominanceFrontier();
DominanceFrontierBase<MachineBasicBlock> &getBase() {
return Base;
}
inline const std::vector<MachineBasicBlock*> &getRoots() const {
return Base.getRoots();
}
MachineBasicBlock *getRoot() const {
return Base.getRoot();
}
bool isPostDominator() const {
return Base.isPostDominator();
}
iterator begin() {
return Base.begin();
}
const_iterator begin() const {
return Base.begin();
}
iterator end() {
return Base.end();
}
const_iterator end() const {
return Base.end();
}
iterator find(MachineBasicBlock *B) {
return Base.find(B);
}
const_iterator find(MachineBasicBlock *B) const {
return Base.find(B);
}
iterator addBasicBlock(MachineBasicBlock *BB, const DomSetType &frontier) {
return Base.addBasicBlock(BB, frontier);
}
void removeBlock(MachineBasicBlock *BB) {
return Base.removeBlock(BB);
}
void addToFrontier(iterator I, MachineBasicBlock *Node) {
return Base.addToFrontier(I, Node);
}
void removeFromFrontier(iterator I, MachineBasicBlock *Node) {
return Base.removeFromFrontier(I, Node);
}
bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const {
return Base.compareDomSet(DS1, DS2);
}
bool compare(DominanceFrontierBase<MachineBasicBlock> &Other) const {
return Base.compare(Other);
}
bool runOnMachineFunction(MachineFunction &F) override;
void releaseMemory() override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/LiveVariables.h | //===-- llvm/CodeGen/LiveVariables.h - Live Variable Analysis ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the LiveVariables analysis pass. For each machine
// instruction in the function, this pass calculates the set of registers that
// are immediately dead after the instruction (i.e., the instruction calculates
// the value, but it is never used) and the set of registers that are used by
// the instruction, but are never used after the instruction (i.e., they are
// killed).
//
// This class computes live variables using a sparse implementation based on
// the machine code SSA form. This class computes live variable information for
// each virtual and _register allocatable_ physical register in a function. It
// uses the dominance properties of SSA form to efficiently compute live
// variables for virtual registers, and assumes that physical registers are only
// live within a single basic block (allowing it to do a single local analysis
// to resolve physical register lifetimes in each basic block). If a physical
// register is not register allocatable, it is not tracked. This is useful for
// things like the stack pointer and condition codes.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LIVEVARIABLES_H
#define LLVM_CODEGEN_LIVEVARIABLES_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SparseBitVector.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
class MachineBasicBlock;
class MachineRegisterInfo;
class LiveVariables : public MachineFunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
LiveVariables() : MachineFunctionPass(ID) {
initializeLiveVariablesPass(*PassRegistry::getPassRegistry());
}
/// VarInfo - This represents the regions where a virtual register is live in
/// the program. We represent this with three different pieces of
/// information: the set of blocks in which the instruction is live
/// throughout, the set of blocks in which the instruction is actually used,
/// and the set of non-phi instructions that are the last users of the value.
///
/// In the common case where a value is defined and killed in the same block,
/// There is one killing instruction, and AliveBlocks is empty.
///
/// Otherwise, the value is live out of the block. If the value is live
/// throughout any blocks, these blocks are listed in AliveBlocks. Blocks
/// where the liveness range ends are not included in AliveBlocks, instead
/// being captured by the Kills set. In these blocks, the value is live into
/// the block (unless the value is defined and killed in the same block) and
/// lives until the specified instruction. Note that there cannot ever be a
/// value whose Kills set contains two instructions from the same basic block.
///
/// PHI nodes complicate things a bit. If a PHI node is the last user of a
/// value in one of its predecessor blocks, it is not listed in the kills set,
/// but does include the predecessor block in the AliveBlocks set (unless that
/// block also defines the value). This leads to the (perfectly sensical)
/// situation where a value is defined in a block, and the last use is a phi
/// node in the successor. In this case, AliveBlocks is empty (the value is
/// not live across any blocks) and Kills is empty (phi nodes are not
/// included). This is sensical because the value must be live to the end of
/// the block, but is not live in any successor blocks.
struct VarInfo {
/// AliveBlocks - Set of blocks in which this value is alive completely
/// through. This is a bit set which uses the basic block number as an
/// index.
///
SparseBitVector<> AliveBlocks;
/// Kills - List of MachineInstruction's which are the last use of this
/// virtual register (kill it) in their basic block.
///
std::vector<MachineInstr*> Kills;
/// removeKill - Delete a kill corresponding to the specified
/// machine instruction. Returns true if there was a kill
/// corresponding to this instruction, false otherwise.
bool removeKill(MachineInstr *MI) {
std::vector<MachineInstr*>::iterator
I = std::find(Kills.begin(), Kills.end(), MI);
if (I == Kills.end())
return false;
Kills.erase(I);
return true;
}
/// findKill - Find a kill instruction in MBB. Return NULL if none is found.
MachineInstr *findKill(const MachineBasicBlock *MBB) const;
/// isLiveIn - Is Reg live in to MBB? This means that Reg is live through
/// MBB, or it is killed in MBB. If Reg is only used by PHI instructions in
/// MBB, it is not considered live in.
bool isLiveIn(const MachineBasicBlock &MBB,
unsigned Reg,
MachineRegisterInfo &MRI);
void dump() const;
};
private:
/// VirtRegInfo - This list is a mapping from virtual register number to
/// variable information.
///
IndexedMap<VarInfo, VirtReg2IndexFunctor> VirtRegInfo;
/// PHIJoins - list of virtual registers that are PHI joins. These registers
/// may have multiple definitions, and they require special handling when
/// building live intervals.
SparseBitVector<> PHIJoins;
private: // Intermediate data structures
MachineFunction *MF;
MachineRegisterInfo* MRI;
const TargetRegisterInfo *TRI;
// PhysRegInfo - Keep track of which instruction was the last def of a
// physical register. This is a purely local property, because all physical
// register references are presumed dead across basic blocks.
std::vector<MachineInstr *> PhysRegDef;
// PhysRegInfo - Keep track of which instruction was the last use of a
// physical register. This is a purely local property, because all physical
// register references are presumed dead across basic blocks.
std::vector<MachineInstr *> PhysRegUse;
std::vector<SmallVector<unsigned, 4>> PHIVarInfo;
// DistanceMap - Keep track the distance of a MI from the start of the
// current basic block.
DenseMap<MachineInstr*, unsigned> DistanceMap;
/// HandlePhysRegKill - Add kills of Reg and its sub-registers to the
/// uses. Pay special attention to the sub-register uses which may come below
/// the last use of the whole register.
bool HandlePhysRegKill(unsigned Reg, MachineInstr *MI);
/// HandleRegMask - Call HandlePhysRegKill for all registers clobbered by Mask.
void HandleRegMask(const MachineOperand&);
void HandlePhysRegUse(unsigned Reg, MachineInstr *MI);
void HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
SmallVectorImpl<unsigned> &Defs);
void UpdatePhysRegDefs(MachineInstr *MI, SmallVectorImpl<unsigned> &Defs);
/// FindLastRefOrPartRef - Return the last reference or partial reference of
/// the specified register.
MachineInstr *FindLastRefOrPartRef(unsigned Reg);
/// FindLastPartialDef - Return the last partial def of the specified
/// register. Also returns the sub-registers that're defined by the
/// instruction.
MachineInstr *FindLastPartialDef(unsigned Reg,
SmallSet<unsigned,4> &PartDefRegs);
/// analyzePHINodes - Gather information about the PHI nodes in here. In
/// particular, we want to map the variable information of a virtual
/// register which is used in a PHI node. We map that to the BB the vreg
/// is coming from.
void analyzePHINodes(const MachineFunction& Fn);
void runOnInstr(MachineInstr *MI, SmallVectorImpl<unsigned> &Defs);
void runOnBlock(MachineBasicBlock *MBB, unsigned NumRegs);
public:
bool runOnMachineFunction(MachineFunction &MF) override;
/// RegisterDefIsDead - Return true if the specified instruction defines the
/// specified register, but that definition is dead.
bool RegisterDefIsDead(MachineInstr *MI, unsigned Reg) const;
//===--------------------------------------------------------------------===//
// API to update live variable information
/// replaceKillInstruction - Update register kill info by replacing a kill
/// instruction with a new one.
void replaceKillInstruction(unsigned Reg, MachineInstr *OldMI,
MachineInstr *NewMI);
/// addVirtualRegisterKilled - Add information about the fact that the
/// specified register is killed after being used by the specified
/// instruction. If AddIfNotFound is true, add a implicit operand if it's
/// not found.
void addVirtualRegisterKilled(unsigned IncomingReg, MachineInstr *MI,
bool AddIfNotFound = false) {
if (MI->addRegisterKilled(IncomingReg, TRI, AddIfNotFound))
getVarInfo(IncomingReg).Kills.push_back(MI);
}
/// removeVirtualRegisterKilled - Remove the specified kill of the virtual
/// register from the live variable information. Returns true if the
/// variable was marked as killed by the specified instruction,
/// false otherwise.
bool removeVirtualRegisterKilled(unsigned reg, MachineInstr *MI) {
if (!getVarInfo(reg).removeKill(MI))
return false;
bool Removed = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isKill() && MO.getReg() == reg) {
MO.setIsKill(false);
Removed = true;
break;
}
}
assert(Removed && "Register is not used by this instruction!");
(void)Removed;
return true;
}
/// removeVirtualRegistersKilled - Remove all killed info for the specified
/// instruction.
void removeVirtualRegistersKilled(MachineInstr *MI);
/// addVirtualRegisterDead - Add information about the fact that the specified
/// register is dead after being used by the specified instruction. If
/// AddIfNotFound is true, add a implicit operand if it's not found.
void addVirtualRegisterDead(unsigned IncomingReg, MachineInstr *MI,
bool AddIfNotFound = false) {
if (MI->addRegisterDead(IncomingReg, TRI, AddIfNotFound))
getVarInfo(IncomingReg).Kills.push_back(MI);
}
/// removeVirtualRegisterDead - Remove the specified kill of the virtual
/// register from the live variable information. Returns true if the
/// variable was marked dead at the specified instruction, false
/// otherwise.
bool removeVirtualRegisterDead(unsigned reg, MachineInstr *MI) {
if (!getVarInfo(reg).removeKill(MI))
return false;
bool Removed = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef() && MO.getReg() == reg) {
MO.setIsDead(false);
Removed = true;
break;
}
}
assert(Removed && "Register is not defined by this instruction!");
(void)Removed;
return true;
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
void releaseMemory() override {
VirtRegInfo.clear();
}
/// getVarInfo - Return the VarInfo structure for the specified VIRTUAL
/// register.
VarInfo &getVarInfo(unsigned RegIdx);
void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
MachineBasicBlock *BB);
void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
MachineBasicBlock *BB,
std::vector<MachineBasicBlock*> &WorkList);
void HandleVirtRegDef(unsigned reg, MachineInstr *MI);
void HandleVirtRegUse(unsigned reg, MachineBasicBlock *MBB,
MachineInstr *MI);
bool isLiveIn(unsigned Reg, const MachineBasicBlock &MBB) {
return getVarInfo(Reg).isLiveIn(MBB, Reg, *MRI);
}
/// isLiveOut - Determine if Reg is live out from MBB, when not considering
/// PHI nodes. This means that Reg is either killed by a successor block or
/// passed through one.
bool isLiveOut(unsigned Reg, const MachineBasicBlock &MBB);
/// addNewBlock - Add a new basic block BB between DomBB and SuccBB. All
/// variables that are live out of DomBB and live into SuccBB will be marked
/// as passing live through BB. This method assumes that the machine code is
/// still in SSA form.
void addNewBlock(MachineBasicBlock *BB,
MachineBasicBlock *DomBB,
MachineBasicBlock *SuccBB);
/// isPHIJoin - Return true if Reg is a phi join register.
bool isPHIJoin(unsigned Reg) { return PHIJoins.test(Reg); }
/// setPHIJoin - Mark Reg as a phi join register.
void setPHIJoin(unsigned Reg) { PHIJoins.set(Reg); }
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/TargetSchedule.h | //===-- llvm/CodeGen/TargetSchedule.h - Sched Machine Model -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a wrapper around MCSchedModel that allows the interface to
// benefit from information currently only available in TargetInstrInfo.
// Ideally, the scheduling interface would be fully defined in the MC layer.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_TARGETSCHEDULE_H
#define LLVM_CODEGEN_TARGETSCHEDULE_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/Target/TargetSubtargetInfo.h"
namespace llvm {
class TargetRegisterInfo;
class TargetSubtargetInfo;
class TargetInstrInfo;
class MachineInstr;
/// Provide an instruction scheduling machine model to CodeGen passes.
class TargetSchedModel {
// For efficiency, hold a copy of the statically defined MCSchedModel for this
// processor.
MCSchedModel SchedModel;
InstrItineraryData InstrItins;
const TargetSubtargetInfo *STI;
const TargetInstrInfo *TII;
SmallVector<unsigned, 16> ResourceFactors;
unsigned MicroOpFactor; // Multiply to normalize microops to resource units.
unsigned ResourceLCM; // Resource units per cycle. Latency normalization factor.
unsigned computeInstrLatency(const MCSchedClassDesc &SCDesc) const;
public:
TargetSchedModel(): SchedModel(MCSchedModel::GetDefaultSchedModel()), STI(nullptr), TII(nullptr) {}
/// \brief Initialize the machine model for instruction scheduling.
///
/// The machine model API keeps a copy of the top-level MCSchedModel table
/// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve
/// dynamic properties.
void init(const MCSchedModel &sm, const TargetSubtargetInfo *sti,
const TargetInstrInfo *tii);
/// Return the MCSchedClassDesc for this instruction.
const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
/// \brief TargetInstrInfo getter.
const TargetInstrInfo *getInstrInfo() const { return TII; }
/// \brief Return true if this machine model includes an instruction-level
/// scheduling model.
///
/// This is more detailed than the course grain IssueWidth and default
/// latency properties, but separate from the per-cycle itinerary data.
bool hasInstrSchedModel() const;
const MCSchedModel *getMCSchedModel() const { return &SchedModel; }
/// \brief Return true if this machine model includes cycle-to-cycle itinerary
/// data.
///
/// This models scheduling at each stage in the processor pipeline.
bool hasInstrItineraries() const;
const InstrItineraryData *getInstrItineraries() const {
if (hasInstrItineraries())
return &InstrItins;
return nullptr;
}
/// \brief Identify the processor corresponding to the current subtarget.
unsigned getProcessorID() const { return SchedModel.getProcessorID(); }
/// \brief Maximum number of micro-ops that may be scheduled per cycle.
unsigned getIssueWidth() const { return SchedModel.IssueWidth; }
/// \brief Return the number of issue slots required for this MI.
unsigned getNumMicroOps(const MachineInstr *MI,
const MCSchedClassDesc *SC = nullptr) const;
/// \brief Get the number of kinds of resources for this target.
unsigned getNumProcResourceKinds() const {
return SchedModel.getNumProcResourceKinds();
}
/// \brief Get a processor resource by ID for convenience.
const MCProcResourceDesc *getProcResource(unsigned PIdx) const {
return SchedModel.getProcResource(PIdx);
}
#ifndef NDEBUG
const char *getResourceName(unsigned PIdx) const {
if (!PIdx)
return "MOps";
return SchedModel.getProcResource(PIdx)->Name;
}
#endif
typedef const MCWriteProcResEntry *ProcResIter;
// \brief Get an iterator into the processor resources consumed by this
// scheduling class.
ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const {
// The subtarget holds a single resource table for all processors.
return STI->getWriteProcResBegin(SC);
}
ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const {
return STI->getWriteProcResEnd(SC);
}
/// \brief Multiply the number of units consumed for a resource by this factor
/// to normalize it relative to other resources.
unsigned getResourceFactor(unsigned ResIdx) const {
return ResourceFactors[ResIdx];
}
/// \brief Multiply number of micro-ops by this factor to normalize it
/// relative to other resources.
unsigned getMicroOpFactor() const {
return MicroOpFactor;
}
/// \brief Multiply cycle count by this factor to normalize it relative to
/// other resources. This is the number of resource units per cycle.
unsigned getLatencyFactor() const {
return ResourceLCM;
}
/// \brief Number of micro-ops that may be buffered for OOO execution.
unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; }
/// \brief Number of resource units that may be buffered for OOO execution.
/// \return The buffer size in resource units or -1 for unlimited.
int getResourceBufferSize(unsigned PIdx) const {
return SchedModel.getProcResource(PIdx)->BufferSize;
}
/// \brief Compute operand latency based on the available machine model.
///
/// Compute and return the latency of the given data dependent def and use
/// when the operand indices are already known. UseMI may be NULL for an
/// unknown user.
unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
const MachineInstr *UseMI, unsigned UseOperIdx)
const;
/// \brief Compute the instruction latency based on the available machine
/// model.
///
/// Compute and return the expected latency of this instruction independent of
/// a particular use. computeOperandLatency is the preferred API, but this is
/// occasionally useful to help estimate instruction cost.
///
/// If UseDefaultDefLatency is false and no new machine sched model is
/// present this method falls back to TII->getInstrLatency with an empty
/// instruction itinerary (this is so we preserve the previous behavior of the
/// if converter after moving it to TargetSchedModel).
unsigned computeInstrLatency(const MachineInstr *MI,
bool UseDefaultDefLatency = true) const;
unsigned computeInstrLatency(unsigned Opcode) const;
/// \brief Output dependency latency of a pair of defs of the same register.
///
/// This is typically one cycle.
unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *DepMI) const;
};
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/IntrinsicLowering.h | //===-- IntrinsicLowering.h - Intrinsic Function Lowering -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the IntrinsicLowering interface. This interface allows
// addition of domain-specific or front-end specific intrinsics to LLVM without
// having to modify all of the C backend or interpreter.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_INTRINSICLOWERING_H
#define LLVM_CODEGEN_INTRINSICLOWERING_H
#include "llvm/IR/Intrinsics.h"
namespace llvm {
class CallInst;
class Module;
class DataLayout;
class IntrinsicLowering {
const DataLayout& DL;
bool Warned;
public:
explicit IntrinsicLowering(const DataLayout &DL) :
DL(DL), Warned(false) {}
/// AddPrototypes - This method, if called, causes all of the prototypes
/// that might be needed by an intrinsic lowering implementation to be
/// inserted into the module specified.
void AddPrototypes(Module &M);
/// LowerIntrinsicCall - This method replaces a call with the LLVM function
/// which should be used to implement the specified intrinsic function call.
/// If an intrinsic function must be implemented by the code generator
/// (such as va_start), this function should print a message and abort.
///
/// Otherwise, if an intrinsic function call can be lowered, the code to
/// implement it (often a call to a non-intrinsic function) is inserted
/// _after_ the call instruction and the call is deleted. The caller must
/// be capable of handling this kind of change.
///
void LowerIntrinsicCall(CallInst *CI);
/// LowerToByteSwap - Replace a call instruction into a call to bswap
/// intrinsic. Return false if it has determined the call is not a
/// simple integer bswap.
static bool LowerToByteSwap(CallInst *CI);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineScheduler.h | //==- MachineScheduler.h - MachineInstr Scheduling Pass ----------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides an interface for customizing the standard MachineScheduler
// pass. Note that the entire pass may be replaced as follows:
//
// <Target>TargetMachine::createPassConfig(PassManagerBase &PM) {
// PM.substitutePass(&MachineSchedulerID, &CustomSchedulerPassID);
// ...}
//
// The MachineScheduler pass is only responsible for choosing the regions to be
// scheduled. Targets can override the DAG builder and scheduler without
// replacing the pass as follows:
//
// ScheduleDAGInstrs *<Target>PassConfig::
// createMachineScheduler(MachineSchedContext *C) {
// return new CustomMachineScheduler(C);
// }
//
// The default scheduler, ScheduleDAGMILive, builds the DAG and drives list
// scheduling while updating the instruction stream, register pressure, and live
// intervals. Most targets don't need to override the DAG builder and list
// schedulier, but subtargets that require custom scheduling heuristics may
// plugin an alternate MachineSchedStrategy. The strategy is responsible for
// selecting the highest priority node from the list:
//
// ScheduleDAGInstrs *<Target>PassConfig::
// createMachineScheduler(MachineSchedContext *C) {
// return new ScheduleDAGMI(C, CustomStrategy(C));
// }
//
// The DAG builder can also be customized in a sense by adding DAG mutations
// that will run after DAG building and before list scheduling. DAG mutations
// can adjust dependencies based on target-specific knowledge or add weak edges
// to aid heuristics:
//
// ScheduleDAGInstrs *<Target>PassConfig::
// createMachineScheduler(MachineSchedContext *C) {
// ScheduleDAGMI *DAG = new ScheduleDAGMI(C, CustomStrategy(C));
// DAG->addMutation(new CustomDependencies(DAG->TII, DAG->TRI));
// return DAG;
// }
//
// A target that supports alternative schedulers can use the
// MachineSchedRegistry to allow command line selection. This can be done by
// implementing the following boilerplate:
//
// static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
// return new CustomMachineScheduler(C);
// }
// static MachineSchedRegistry
// SchedCustomRegistry("custom", "Run my target's custom scheduler",
// createCustomMachineSched);
//
//
// Finally, subtargets that don't need to implement custom heuristics but would
// like to configure the GenericScheduler's policy for a given scheduler region,
// including scheduling direction and register pressure tracking policy, can do
// this:
//
// void <SubTarget>Subtarget::
// overrideSchedPolicy(MachineSchedPolicy &Policy,
// MachineInstr *begin,
// MachineInstr *end,
// unsigned NumRegionInstrs) const {
// Policy.<Flag> = true;
// }
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
#define LLVM_CODEGEN_MACHINESCHEDULER_H
#include "llvm/CodeGen/MachinePassRegistry.h"
#include "llvm/CodeGen/RegisterPressure.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include <memory>
namespace llvm {
extern cl::opt<bool> ForceTopDown;
extern cl::opt<bool> ForceBottomUp;
class AliasAnalysis;
class LiveIntervals;
class MachineDominatorTree;
class MachineLoopInfo;
class RegisterClassInfo;
class ScheduleDAGInstrs;
class SchedDFSResult;
class ScheduleHazardRecognizer;
/// MachineSchedContext provides enough context from the MachineScheduler pass
/// for the target to instantiate a scheduler.
struct MachineSchedContext {
MachineFunction *MF;
const MachineLoopInfo *MLI;
const MachineDominatorTree *MDT;
const TargetPassConfig *PassConfig;
AliasAnalysis *AA;
LiveIntervals *LIS;
RegisterClassInfo *RegClassInfo;
MachineSchedContext();
virtual ~MachineSchedContext();
};
/// MachineSchedRegistry provides a selection of available machine instruction
/// schedulers.
class MachineSchedRegistry : public MachinePassRegistryNode {
public:
typedef ScheduleDAGInstrs *(*ScheduleDAGCtor)(MachineSchedContext *);
// RegisterPassParser requires a (misnamed) FunctionPassCtor type.
typedef ScheduleDAGCtor FunctionPassCtor;
static MachinePassRegistry Registry;
MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
: MachinePassRegistryNode(N, D, (MachinePassCtor)C) {
Registry.Add(this);
}
~MachineSchedRegistry() { Registry.Remove(this); }
// Accessors.
//
MachineSchedRegistry *getNext() const {
return (MachineSchedRegistry *)MachinePassRegistryNode::getNext();
}
static MachineSchedRegistry *getList() {
return (MachineSchedRegistry *)Registry.getList();
}
static void setListener(MachinePassRegistryListener *L) {
Registry.setListener(L);
}
};
class ScheduleDAGMI;
/// Define a generic scheduling policy for targets that don't provide their own
/// MachineSchedStrategy. This can be overriden for each scheduling region
/// before building the DAG.
struct MachineSchedPolicy {
// Allow the scheduler to disable register pressure tracking.
bool ShouldTrackPressure;
// Allow the scheduler to force top-down or bottom-up scheduling. If neither
// is true, the scheduler runs in both directions and converges.
bool OnlyTopDown;
bool OnlyBottomUp;
MachineSchedPolicy(): ShouldTrackPressure(false), OnlyTopDown(false),
OnlyBottomUp(false) {}
};
/// MachineSchedStrategy - Interface to the scheduling algorithm used by
/// ScheduleDAGMI.
///
/// Initialization sequence:
/// initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots
class MachineSchedStrategy {
virtual void anchor();
public:
virtual ~MachineSchedStrategy() {}
/// Optionally override the per-region scheduling policy.
virtual void initPolicy(MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
unsigned NumRegionInstrs) {}
/// Check if pressure tracking is needed before building the DAG and
/// initializing this strategy. Called after initPolicy.
virtual bool shouldTrackPressure() const { return true; }
/// Initialize the strategy after building the DAG for a new region.
virtual void initialize(ScheduleDAGMI *DAG) = 0;
/// Notify this strategy that all roots have been released (including those
/// that depend on EntrySU or ExitSU).
virtual void registerRoots() {}
/// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
/// schedule the node at the top of the unscheduled region. Otherwise it will
/// be scheduled at the bottom.
virtual SUnit *pickNode(bool &IsTopNode) = 0;
/// \brief Scheduler callback to notify that a new subtree is scheduled.
virtual void scheduleTree(unsigned SubtreeID) {}
/// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
/// instruction and updated scheduled/remaining flags in the DAG nodes.
virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
/// When all predecessor dependencies have been resolved, free this node for
/// top-down scheduling.
virtual void releaseTopNode(SUnit *SU) = 0;
/// When all successor dependencies have been resolved, free this node for
/// bottom-up scheduling.
virtual void releaseBottomNode(SUnit *SU) = 0;
};
/// Mutate the DAG as a postpass after normal DAG building.
class ScheduleDAGMutation {
virtual void anchor();
public:
virtual ~ScheduleDAGMutation() {}
virtual void apply(ScheduleDAGMI *DAG) = 0;
};
/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply
/// schedules machine instructions according to the given MachineSchedStrategy
/// without much extra book-keeping. This is the common functionality between
/// PreRA and PostRA MachineScheduler.
class ScheduleDAGMI : public ScheduleDAGInstrs {
protected:
AliasAnalysis *AA;
std::unique_ptr<MachineSchedStrategy> SchedImpl;
/// Topo - A topological ordering for SUnits which permits fast IsReachable
/// and similar queries.
ScheduleDAGTopologicalSort Topo;
/// Ordered list of DAG postprocessing steps.
std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
/// The top of the unscheduled zone.
MachineBasicBlock::iterator CurrentTop;
/// The bottom of the unscheduled zone.
MachineBasicBlock::iterator CurrentBottom;
/// Record the next node in a scheduled cluster.
const SUnit *NextClusterPred;
const SUnit *NextClusterSucc;
#ifndef NDEBUG
/// The number of instructions scheduled so far. Used to cut off the
/// scheduler at the point determined by misched-cutoff.
unsigned NumInstrsScheduled;
#endif
public:
ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
bool IsPostRA)
: ScheduleDAGInstrs(*C->MF, C->MLI, IsPostRA,
/*RemoveKillFlags=*/IsPostRA, C->LIS),
AA(C->AA), SchedImpl(std::move(S)), Topo(SUnits, &ExitSU), CurrentTop(),
CurrentBottom(), NextClusterPred(nullptr), NextClusterSucc(nullptr) {
#ifndef NDEBUG
NumInstrsScheduled = 0;
#endif
}
// Provide a vtable anchor
~ScheduleDAGMI() override;
/// Return true if this DAG supports VReg liveness and RegPressure.
virtual bool hasVRegLiveness() const { return false; }
/// Add a postprocessing step to the DAG builder.
/// Mutations are applied in the order that they are added after normal DAG
/// building and before MachineSchedStrategy initialization.
///
/// ScheduleDAGMI takes ownership of the Mutation object.
void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
Mutations.push_back(std::move(Mutation));
}
/// \brief True if an edge can be added from PredSU to SuccSU without creating
/// a cycle.
bool canAddEdge(SUnit *SuccSU, SUnit *PredSU);
/// \brief Add a DAG edge to the given SU with the given predecessor
/// dependence data.
///
/// \returns true if the edge may be added without creating a cycle OR if an
/// equivalent edge already existed (false indicates failure).
bool addEdge(SUnit *SuccSU, const SDep &PredDep);
MachineBasicBlock::iterator top() const { return CurrentTop; }
MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
/// Implement the ScheduleDAGInstrs interface for handling the next scheduling
/// region. This covers all instructions in a block, while schedule() may only
/// cover a subset.
void enterRegion(MachineBasicBlock *bb,
MachineBasicBlock::iterator begin,
MachineBasicBlock::iterator end,
unsigned regioninstrs) override;
/// Implement ScheduleDAGInstrs interface for scheduling a sequence of
/// reorderable instructions.
void schedule() override;
/// Change the position of an instruction within the basic block and update
/// live ranges and region boundary iterators.
void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
const SUnit *getNextClusterPred() const { return NextClusterPred; }
const SUnit *getNextClusterSucc() const { return NextClusterSucc; }
void viewGraph(const Twine &Name, const Twine &Title) override;
void viewGraph() override;
protected:
// Top-Level entry points for the schedule() driver...
/// Apply each ScheduleDAGMutation step in order. This allows different
/// instances of ScheduleDAGMI to perform custom DAG postprocessing.
void postprocessDAG();
/// Release ExitSU predecessors and setup scheduler queues.
void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
/// Update scheduler DAG and queues after scheduling an instruction.
void updateQueues(SUnit *SU, bool IsTopNode);
/// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
void placeDebugValues();
/// \brief dump the scheduled Sequence.
void dumpSchedule() const;
// Lesser helpers...
bool checkSchedLimit();
void findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
SmallVectorImpl<SUnit*> &BotRoots);
void releaseSucc(SUnit *SU, SDep *SuccEdge);
void releaseSuccessors(SUnit *SU);
void releasePred(SUnit *SU, SDep *PredEdge);
void releasePredecessors(SUnit *SU);
};
/// ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules
/// machine instructions while updating LiveIntervals and tracking regpressure.
class ScheduleDAGMILive : public ScheduleDAGMI {
protected:
RegisterClassInfo *RegClassInfo;
/// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees
/// will be empty.
SchedDFSResult *DFSResult;
BitVector ScheduledTrees;
MachineBasicBlock::iterator LiveRegionEnd;
// Map each SU to its summary of pressure changes. This array is updated for
// liveness during bottom-up scheduling. Top-down scheduling may proceed but
// has no affect on the pressure diffs.
PressureDiffs SUPressureDiffs;
/// Register pressure in this region computed by initRegPressure.
bool ShouldTrackPressure;
IntervalPressure RegPressure;
RegPressureTracker RPTracker;
/// List of pressure sets that exceed the target's pressure limit before
/// scheduling, listed in increasing set ID order. Each pressure set is paired
/// with its max pressure in the currently scheduled regions.
std::vector<PressureChange> RegionCriticalPSets;
/// The top of the unscheduled zone.
IntervalPressure TopPressure;
RegPressureTracker TopRPTracker;
/// The bottom of the unscheduled zone.
IntervalPressure BotPressure;
RegPressureTracker BotRPTracker;
public:
ScheduleDAGMILive(MachineSchedContext *C,
std::unique_ptr<MachineSchedStrategy> S)
: ScheduleDAGMI(C, std::move(S), /*IsPostRA=*/false),
RegClassInfo(C->RegClassInfo), DFSResult(nullptr),
ShouldTrackPressure(false), RPTracker(RegPressure),
TopRPTracker(TopPressure), BotRPTracker(BotPressure) {}
~ScheduleDAGMILive() override;
/// Return true if this DAG supports VReg liveness and RegPressure.
bool hasVRegLiveness() const override { return true; }
/// \brief Return true if register pressure tracking is enabled.
bool isTrackingPressure() const { return ShouldTrackPressure; }
/// Get current register pressure for the top scheduled instructions.
const IntervalPressure &getTopPressure() const { return TopPressure; }
const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
/// Get current register pressure for the bottom scheduled instructions.
const IntervalPressure &getBotPressure() const { return BotPressure; }
const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
/// Get register pressure for the entire scheduling region before scheduling.
const IntervalPressure &getRegPressure() const { return RegPressure; }
const std::vector<PressureChange> &getRegionCriticalPSets() const {
return RegionCriticalPSets;
}
PressureDiff &getPressureDiff(const SUnit *SU) {
return SUPressureDiffs[SU->NodeNum];
}
/// Compute a DFSResult after DAG building is complete, and before any
/// queue comparisons.
void computeDFSResult();
/// Return a non-null DFS result if the scheduling strategy initialized it.
const SchedDFSResult *getDFSResult() const { return DFSResult; }
BitVector &getScheduledTrees() { return ScheduledTrees; }
/// Implement the ScheduleDAGInstrs interface for handling the next scheduling
/// region. This covers all instructions in a block, while schedule() may only
/// cover a subset.
void enterRegion(MachineBasicBlock *bb,
MachineBasicBlock::iterator begin,
MachineBasicBlock::iterator end,
unsigned regioninstrs) override;
/// Implement ScheduleDAGInstrs interface for scheduling a sequence of
/// reorderable instructions.
void schedule() override;
/// Compute the cyclic critical path through the DAG.
unsigned computeCyclicCriticalPath();
protected:
// Top-Level entry points for the schedule() driver...
/// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
/// enabled. This sets up three trackers. RPTracker will cover the entire DAG
/// region, TopTracker and BottomTracker will be initialized to the top and
/// bottom of the DAG region without covereing any unscheduled instruction.
void buildDAGWithRegPressure();
/// Move an instruction and update register pressure.
void scheduleMI(SUnit *SU, bool IsTopNode);
// Lesser helpers...
void initRegPressure();
void updatePressureDiffs(ArrayRef<unsigned> LiveUses);
void updateScheduledPressure(const SUnit *SU,
const std::vector<unsigned> &NewMaxPressure);
};
//===----------------------------------------------------------------------===//
///
/// Helpers for implementing custom MachineSchedStrategy classes. These take
/// care of the book-keeping associated with list scheduling heuristics.
///
// //
///////////////////////////////////////////////////////////////////////////////
/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
///
/// This is a convenience class that may be used by implementations of
/// MachineSchedStrategy.
class ReadyQueue {
unsigned ID;
std::string Name;
std::vector<SUnit*> Queue;
public:
ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
unsigned getID() const { return ID; }
StringRef getName() const { return Name; }
// SU is in this queue if it's NodeQueueID is a superset of this ID.
bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
bool empty() const { return Queue.empty(); }
void clear() { Queue.clear(); }
unsigned size() const { return Queue.size(); }
typedef std::vector<SUnit*>::iterator iterator;
iterator begin() { return Queue.begin(); }
iterator end() { return Queue.end(); }
ArrayRef<SUnit*> elements() { return Queue; }
iterator find(SUnit *SU) {
return std::find(Queue.begin(), Queue.end(), SU);
}
void push(SUnit *SU) {
Queue.push_back(SU);
SU->NodeQueueId |= ID;
}
iterator remove(iterator I) {
(*I)->NodeQueueId &= ~ID;
*I = Queue.back();
unsigned idx = I - Queue.begin();
Queue.pop_back();
return Queue.begin() + idx;
}
void dump();
};
/// Summarize the unscheduled region.
struct SchedRemainder {
// Critical path through the DAG in expected latency.
unsigned CriticalPath;
unsigned CyclicCritPath;
// Scaled count of micro-ops left to schedule.
unsigned RemIssueCount;
bool IsAcyclicLatencyLimited;
// Unscheduled resources
SmallVector<unsigned, 16> RemainingCounts;
void reset() {
CriticalPath = 0;
CyclicCritPath = 0;
RemIssueCount = 0;
IsAcyclicLatencyLimited = false;
RemainingCounts.clear();
}
SchedRemainder() { reset(); }
void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
};
/// Each Scheduling boundary is associated with ready queues. It tracks the
/// current cycle in the direction of movement, and maintains the state
/// of "hazards" and other interlocks at the current cycle.
class SchedBoundary {
public:
/// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
enum {
TopQID = 1,
BotQID = 2,
LogMaxQID = 2
};
ScheduleDAGMI *DAG;
const TargetSchedModel *SchedModel;
SchedRemainder *Rem;
ReadyQueue Available;
ReadyQueue Pending;
ScheduleHazardRecognizer *HazardRec;
private:
/// True if the pending Q should be checked/updated before scheduling another
/// instruction.
bool CheckPending;
// For heuristics, keep a list of the nodes that immediately depend on the
// most recently scheduled node.
SmallPtrSet<const SUnit*, 8> NextSUs;
/// Number of cycles it takes to issue the instructions scheduled in this
/// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
/// See getStalls().
unsigned CurrCycle;
/// Micro-ops issued in the current cycle
unsigned CurrMOps;
/// MinReadyCycle - Cycle of the soonest available instruction.
unsigned MinReadyCycle;
// The expected latency of the critical path in this scheduled zone.
unsigned ExpectedLatency;
// The latency of dependence chains leading into this zone.
// For each node scheduled bottom-up: DLat = max DLat, N.Depth.
// For each cycle scheduled: DLat -= 1.
unsigned DependentLatency;
/// Count the scheduled (issued) micro-ops that can be retired by
/// time=CurrCycle assuming the first scheduled instr is retired at time=0.
unsigned RetiredMOps;
// Count scheduled resources that have been executed. Resources are
// considered executed if they become ready in the time that it takes to
// saturate any resource including the one in question. Counts are scaled
// for direct comparison with other resources. Counts can be compared with
// MOps * getMicroOpFactor and Latency * getLatencyFactor.
SmallVector<unsigned, 16> ExecutedResCounts;
/// Cache the max count for a single resource.
unsigned MaxExecutedResCount;
// Cache the critical resources ID in this scheduled zone.
unsigned ZoneCritResIdx;
// Is the scheduled region resource limited vs. latency limited.
bool IsResourceLimited;
// Record the highest cycle at which each resource has been reserved by a
// scheduled instruction.
SmallVector<unsigned, 16> ReservedCycles;
#ifndef NDEBUG
// Remember the greatest possible stall as an upper bound on the number of
// times we should retry the pending queue because of a hazard.
unsigned MaxObservedStall;
#endif
public:
/// Pending queues extend the ready queues with the same ID and the
/// PendingFlag set.
SchedBoundary(unsigned ID, const Twine &Name):
DAG(nullptr), SchedModel(nullptr), Rem(nullptr), Available(ID, Name+".A"),
Pending(ID << LogMaxQID, Name+".P"),
HazardRec(nullptr) {
reset();
}
~SchedBoundary();
void reset();
void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
SchedRemainder *rem);
bool isTop() const {
return Available.getID() == TopQID;
}
/// Number of cycles to issue the instructions scheduled in this zone.
unsigned getCurrCycle() const { return CurrCycle; }
/// Micro-ops issued in the current cycle
unsigned getCurrMOps() const { return CurrMOps; }
/// Return true if the given SU is used by the most recently scheduled
/// instruction.
bool isNextSU(const SUnit *SU) const { return NextSUs.count(SU); }
// The latency of dependence chains leading into this zone.
unsigned getDependentLatency() const { return DependentLatency; }
/// Get the number of latency cycles "covered" by the scheduled
/// instructions. This is the larger of the critical path within the zone
/// and the number of cycles required to issue the instructions.
unsigned getScheduledLatency() const {
return std::max(ExpectedLatency, CurrCycle);
}
unsigned getUnscheduledLatency(SUnit *SU) const {
return isTop() ? SU->getHeight() : SU->getDepth();
}
unsigned getResourceCount(unsigned ResIdx) const {
return ExecutedResCounts[ResIdx];
}
/// Get the scaled count of scheduled micro-ops and resources, including
/// executed resources.
unsigned getCriticalCount() const {
if (!ZoneCritResIdx)
return RetiredMOps * SchedModel->getMicroOpFactor();
return getResourceCount(ZoneCritResIdx);
}
/// Get a scaled count for the minimum execution time of the scheduled
/// micro-ops that are ready to execute by getExecutedCount. Notice the
/// feedback loop.
unsigned getExecutedCount() const {
return std::max(CurrCycle * SchedModel->getLatencyFactor(),
MaxExecutedResCount);
}
unsigned getZoneCritResIdx() const { return ZoneCritResIdx; }
// Is the scheduled region resource limited vs. latency limited.
bool isResourceLimited() const { return IsResourceLimited; }
/// Get the difference between the given SUnit's ready time and the current
/// cycle.
unsigned getLatencyStallCycles(SUnit *SU);
unsigned getNextResourceCycle(unsigned PIdx, unsigned Cycles);
bool checkHazard(SUnit *SU);
unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
unsigned getOtherResourceCount(unsigned &OtherCritIdx);
void releaseNode(SUnit *SU, unsigned ReadyCycle);
void releaseTopNode(SUnit *SU);
void releaseBottomNode(SUnit *SU);
void bumpCycle(unsigned NextCycle);
void incExecutedResources(unsigned PIdx, unsigned Count);
unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle);
void bumpNode(SUnit *SU);
void releasePending();
void removeReady(SUnit *SU);
/// Call this before applying any other heuristics to the Available queue.
/// Updates the Available/Pending Q's if necessary and returns the single
/// available instruction, or NULL if there are multiple candidates.
SUnit *pickOnlyChoice();
#ifndef NDEBUG
void dumpScheduledState();
#endif
};
/// Base class for GenericScheduler. This class maintains information about
/// scheduling candidates based on TargetSchedModel making it easy to implement
/// heuristics for either preRA or postRA scheduling.
class GenericSchedulerBase : public MachineSchedStrategy {
public:
/// Represent the type of SchedCandidate found within a single queue.
/// pickNodeBidirectional depends on these listed by decreasing priority.
enum CandReason {
NoCand, PhysRegCopy, RegExcess, RegCritical, Stall, Cluster, Weak, RegMax,
ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
#ifndef NDEBUG
static const char *getReasonStr(GenericSchedulerBase::CandReason Reason);
#endif
/// Policy for scheduling the next instruction in the candidate's zone.
struct CandPolicy {
bool ReduceLatency;
unsigned ReduceResIdx;
unsigned DemandResIdx;
CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {}
};
/// Status of an instruction's critical resource consumption.
struct SchedResourceDelta {
// Count critical resources in the scheduled region required by SU.
unsigned CritResources;
// Count critical resources from another region consumed by SU.
unsigned DemandedResources;
SchedResourceDelta(): CritResources(0), DemandedResources(0) {}
bool operator==(const SchedResourceDelta &RHS) const {
return CritResources == RHS.CritResources
&& DemandedResources == RHS.DemandedResources;
}
bool operator!=(const SchedResourceDelta &RHS) const {
return !operator==(RHS);
}
};
/// Store the state used by GenericScheduler heuristics, required for the
/// lifetime of one invocation of pickNode().
struct SchedCandidate {
CandPolicy Policy;
// The best SUnit candidate.
SUnit *SU;
// The reason for this candidate.
CandReason Reason;
// Set of reasons that apply to multiple candidates.
uint32_t RepeatReasonSet;
// Register pressure values for the best candidate.
RegPressureDelta RPDelta;
// Critical resource consumption of the best candidate.
SchedResourceDelta ResDelta;
SchedCandidate(const CandPolicy &policy)
: Policy(policy), SU(nullptr), Reason(NoCand), RepeatReasonSet(0) {}
bool isValid() const { return SU; }
// Copy the status of another candidate without changing policy.
void setBest(SchedCandidate &Best) {
assert(Best.Reason != NoCand && "uninitialized Sched candidate");
SU = Best.SU;
Reason = Best.Reason;
RPDelta = Best.RPDelta;
ResDelta = Best.ResDelta;
}
bool isRepeat(CandReason R) { return RepeatReasonSet & (1 << R); }
void setRepeat(CandReason R) { RepeatReasonSet |= (1 << R); }
void initResourceDelta(const ScheduleDAGMI *DAG,
const TargetSchedModel *SchedModel);
};
protected:
const MachineSchedContext *Context;
const TargetSchedModel *SchedModel;
const TargetRegisterInfo *TRI;
SchedRemainder Rem;
protected:
GenericSchedulerBase(const MachineSchedContext *C):
Context(C), SchedModel(nullptr), TRI(nullptr) {}
void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone,
SchedBoundary *OtherZone);
#ifndef NDEBUG
void traceCandidate(const SchedCandidate &Cand);
#endif
};
/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
/// the schedule.
class GenericScheduler : public GenericSchedulerBase {
ScheduleDAGMILive *DAG;
// State of the top and bottom scheduled instruction boundaries.
SchedBoundary Top;
SchedBoundary Bot;
MachineSchedPolicy RegionPolicy;
public:
GenericScheduler(const MachineSchedContext *C):
GenericSchedulerBase(C), DAG(nullptr), Top(SchedBoundary::TopQID, "TopQ"),
Bot(SchedBoundary::BotQID, "BotQ") {}
void initPolicy(MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
unsigned NumRegionInstrs) override;
bool shouldTrackPressure() const override {
return RegionPolicy.ShouldTrackPressure;
}
void initialize(ScheduleDAGMI *dag) override;
SUnit *pickNode(bool &IsTopNode) override;
void schedNode(SUnit *SU, bool IsTopNode) override;
void releaseTopNode(SUnit *SU) override {
Top.releaseTopNode(SU);
}
void releaseBottomNode(SUnit *SU) override {
Bot.releaseBottomNode(SU);
}
void registerRoots() override;
protected:
void checkAcyclicLatency();
void tryCandidate(SchedCandidate &Cand,
SchedCandidate &TryCand,
SchedBoundary &Zone,
const RegPressureTracker &RPTracker,
RegPressureTracker &TempTracker);
SUnit *pickNodeBidirectional(bool &IsTopNode);
void pickNodeFromQueue(SchedBoundary &Zone,
const RegPressureTracker &RPTracker,
SchedCandidate &Candidate);
void reschedulePhysRegCopies(SUnit *SU, bool isTop);
};
/// PostGenericScheduler - Interface to the scheduling algorithm used by
/// ScheduleDAGMI.
///
/// Callbacks from ScheduleDAGMI:
/// initPolicy -> initialize(DAG) -> registerRoots -> pickNode ...
class PostGenericScheduler : public GenericSchedulerBase {
ScheduleDAGMI *DAG;
SchedBoundary Top;
SmallVector<SUnit*, 8> BotRoots;
public:
PostGenericScheduler(const MachineSchedContext *C):
GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ") {}
~PostGenericScheduler() override {}
void initPolicy(MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
unsigned NumRegionInstrs) override {
/* no configurable policy */
};
/// PostRA scheduling does not track pressure.
bool shouldTrackPressure() const override { return false; }
void initialize(ScheduleDAGMI *Dag) override;
void registerRoots() override;
SUnit *pickNode(bool &IsTopNode) override;
void scheduleTree(unsigned SubtreeID) override {
llvm_unreachable("PostRA scheduler does not support subtree analysis.");
}
void schedNode(SUnit *SU, bool IsTopNode) override;
void releaseTopNode(SUnit *SU) override {
Top.releaseTopNode(SU);
}
// Only called for roots.
void releaseBottomNode(SUnit *SU) override {
BotRoots.push_back(SU);
}
protected:
void tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);
void pickNodeFromQueue(SchedCandidate &Cand);
};
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineValueType.h | //===- CodeGen/MachineValueType.h - Machine-Level types ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the set of machine-level target independent types which
// legal values in the code generator use.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEVALUETYPE_H
#define LLVM_CODEGEN_MACHINEVALUETYPE_H
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
namespace llvm {
class Type;
/// MVT - Machine Value Type. Every type that is supported natively by some
/// processor targeted by LLVM occurs here. This means that any legal value
/// type can be represented by an MVT.
class MVT {
public:
enum SimpleValueType {
// INVALID_SIMPLE_VALUE_TYPE - Simple value types less than zero are
// considered extended value types.
INVALID_SIMPLE_VALUE_TYPE = -1,
// If you change this numbering, you must change the values in
// ValueTypes.td as well!
Other = 0, // This is a non-standard value
i1 = 1, // This is a 1 bit integer value
i8 = 2, // This is an 8 bit integer value
i16 = 3, // This is a 16 bit integer value
i32 = 4, // This is a 32 bit integer value
i64 = 5, // This is a 64 bit integer value
i128 = 6, // This is a 128 bit integer value
FIRST_INTEGER_VALUETYPE = i1,
LAST_INTEGER_VALUETYPE = i128,
f16 = 7, // This is a 16 bit floating point value
f32 = 8, // This is a 32 bit floating point value
f64 = 9, // This is a 64 bit floating point value
f80 = 10, // This is a 80 bit floating point value
f128 = 11, // This is a 128 bit floating point value
ppcf128 = 12, // This is a PPC 128-bit floating point value
FIRST_FP_VALUETYPE = f16,
LAST_FP_VALUETYPE = ppcf128,
v2i1 = 13, // 2 x i1
v4i1 = 14, // 4 x i1
v8i1 = 15, // 8 x i1
v16i1 = 16, // 16 x i1
v32i1 = 17, // 32 x i1
v64i1 = 18, // 64 x i1
v1i8 = 19, // 1 x i8
v2i8 = 20, // 2 x i8
v4i8 = 21, // 4 x i8
v8i8 = 22, // 8 x i8
v16i8 = 23, // 16 x i8
v32i8 = 24, // 32 x i8
v64i8 = 25, // 64 x i8
v1i16 = 26, // 1 x i16
v2i16 = 27, // 2 x i16
v4i16 = 28, // 4 x i16
v8i16 = 29, // 8 x i16
v16i16 = 30, // 16 x i16
v32i16 = 31, // 32 x i16
v1i32 = 32, // 1 x i32
v2i32 = 33, // 2 x i32
v4i32 = 34, // 4 x i32
v8i32 = 35, // 8 x i32
v16i32 = 36, // 16 x i32
v1i64 = 37, // 1 x i64
v2i64 = 38, // 2 x i64
v4i64 = 39, // 4 x i64
v8i64 = 40, // 8 x i64
v16i64 = 41, // 16 x i64
v1i128 = 42, // 1 x i128
FIRST_INTEGER_VECTOR_VALUETYPE = v2i1,
LAST_INTEGER_VECTOR_VALUETYPE = v1i128,
v2f16 = 43, // 2 x f16
v4f16 = 44, // 4 x f16
v8f16 = 45, // 8 x f16
v1f32 = 46, // 1 x f32
v2f32 = 47, // 2 x f32
v4f32 = 48, // 4 x f32
v8f32 = 49, // 8 x f32
v16f32 = 50, // 16 x f32
v1f64 = 51, // 1 x f64
v2f64 = 52, // 2 x f64
v4f64 = 53, // 4 x f64
v8f64 = 54, // 8 x f64
FIRST_FP_VECTOR_VALUETYPE = v2f16,
LAST_FP_VECTOR_VALUETYPE = v8f64,
FIRST_VECTOR_VALUETYPE = v2i1,
LAST_VECTOR_VALUETYPE = v8f64,
x86mmx = 55, // This is an X86 MMX value
Glue = 56, // This glues nodes together during pre-RA sched
isVoid = 57, // This has no value
Untyped = 58, // This value takes a register, but has
// unspecified type. The register class
// will be determined by the opcode.
FIRST_VALUETYPE = 0, // This is always the beginning of the list.
LAST_VALUETYPE = 59, // This always remains at the end of the list.
// This is the current maximum for LAST_VALUETYPE.
// MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
// This value must be a multiple of 32.
MAX_ALLOWED_VALUETYPE = 64,
// Metadata - This is MDNode or MDString.
Metadata = 250,
// iPTRAny - An int value the size of the pointer of the current
// target to any address space. This must only be used internal to
// tblgen. Other than for overloading, we treat iPTRAny the same as iPTR.
iPTRAny = 251,
// vAny - A vector with any length and element size. This is used
// for intrinsics that have overloadings based on vector types.
// This is only for tblgen's consumption!
vAny = 252,
// fAny - Any floating-point or vector floating-point value. This is used
// for intrinsics that have overloadings based on floating-point types.
// This is only for tblgen's consumption!
fAny = 253,
// iAny - An integer or vector integer value of any bit width. This is
// used for intrinsics that have overloadings based on integer bit widths.
// This is only for tblgen's consumption!
iAny = 254,
// iPTR - An int value the size of the pointer of the current
// target. This should only be used internal to tblgen!
iPTR = 255,
// Any - Any type. This is used for intrinsics that have overloadings.
// This is only for tblgen's consumption!
Any = 256
};
SimpleValueType SimpleTy;
LLVM_CONSTEXPR MVT() : SimpleTy(INVALID_SIMPLE_VALUE_TYPE) {}
LLVM_CONSTEXPR MVT(SimpleValueType SVT) : SimpleTy(SVT) { }
bool operator>(const MVT& S) const { return SimpleTy > S.SimpleTy; }
bool operator<(const MVT& S) const { return SimpleTy < S.SimpleTy; }
bool operator==(const MVT& S) const { return SimpleTy == S.SimpleTy; }
bool operator!=(const MVT& S) const { return SimpleTy != S.SimpleTy; }
bool operator>=(const MVT& S) const { return SimpleTy >= S.SimpleTy; }
bool operator<=(const MVT& S) const { return SimpleTy <= S.SimpleTy; }
/// isValid - Return true if this is a valid simple valuetype.
bool isValid() const {
return (SimpleTy >= MVT::FIRST_VALUETYPE &&
SimpleTy < MVT::LAST_VALUETYPE);
}
/// isFloatingPoint - Return true if this is a FP, or a vector FP type.
bool isFloatingPoint() const {
return ((SimpleTy >= MVT::FIRST_FP_VALUETYPE &&
SimpleTy <= MVT::LAST_FP_VALUETYPE) ||
(SimpleTy >= MVT::FIRST_FP_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_FP_VECTOR_VALUETYPE));
}
/// isInteger - Return true if this is an integer, or a vector integer type.
bool isInteger() const {
return ((SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
SimpleTy <= MVT::LAST_INTEGER_VALUETYPE) ||
(SimpleTy >= MVT::FIRST_INTEGER_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_INTEGER_VECTOR_VALUETYPE));
}
/// isVector - Return true if this is a vector value type.
bool isVector() const {
return (SimpleTy >= MVT::FIRST_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_VECTOR_VALUETYPE);
}
/// is16BitVector - Return true if this is a 16-bit vector type.
bool is16BitVector() const {
return (SimpleTy == MVT::v2i8 || SimpleTy == MVT::v1i16 ||
SimpleTy == MVT::v16i1);
}
/// is32BitVector - Return true if this is a 32-bit vector type.
bool is32BitVector() const {
return (SimpleTy == MVT::v4i8 || SimpleTy == MVT::v2i16 ||
SimpleTy == MVT::v1i32 || SimpleTy == MVT::v2f16 ||
SimpleTy == MVT::v1f32);
}
/// is64BitVector - Return true if this is a 64-bit vector type.
bool is64BitVector() const {
return (SimpleTy == MVT::v8i8 || SimpleTy == MVT::v4i16 ||
SimpleTy == MVT::v2i32 || SimpleTy == MVT::v1i64 ||
SimpleTy == MVT::v4f16 || SimpleTy == MVT::v2f32 ||
SimpleTy == MVT::v1f64);
}
/// is128BitVector - Return true if this is a 128-bit vector type.
bool is128BitVector() const {
return (SimpleTy == MVT::v16i8 || SimpleTy == MVT::v8i16 ||
SimpleTy == MVT::v4i32 || SimpleTy == MVT::v2i64 ||
SimpleTy == MVT::v1i128 || SimpleTy == MVT::v8f16 ||
SimpleTy == MVT::v4f32 || SimpleTy == MVT::v2f64);
}
/// is256BitVector - Return true if this is a 256-bit vector type.
bool is256BitVector() const {
return (SimpleTy == MVT::v8f32 || SimpleTy == MVT::v4f64 ||
SimpleTy == MVT::v32i8 || SimpleTy == MVT::v16i16 ||
SimpleTy == MVT::v8i32 || SimpleTy == MVT::v4i64);
}
/// is512BitVector - Return true if this is a 512-bit vector type.
bool is512BitVector() const {
return (SimpleTy == MVT::v8f64 || SimpleTy == MVT::v16f32 ||
SimpleTy == MVT::v64i8 || SimpleTy == MVT::v32i16 ||
SimpleTy == MVT::v8i64 || SimpleTy == MVT::v16i32);
}
/// is1024BitVector - Return true if this is a 1024-bit vector type.
bool is1024BitVector() const {
return (SimpleTy == MVT::v16i64);
}
/// isOverloaded - Return true if this is an overloaded type for TableGen.
bool isOverloaded() const {
return (SimpleTy==MVT::Any ||
SimpleTy==MVT::iAny || SimpleTy==MVT::fAny ||
SimpleTy==MVT::vAny || SimpleTy==MVT::iPTRAny);
}
/// isPow2VectorType - Returns true if the given vector is a power of 2.
bool isPow2VectorType() const {
unsigned NElts = getVectorNumElements();
return !(NElts & (NElts - 1));
}
/// getPow2VectorType - Widens the length of the given vector MVT up to
/// the nearest power of 2 and returns that type.
MVT getPow2VectorType() const {
if (isPow2VectorType())
return *this;
unsigned NElts = getVectorNumElements();
unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
return MVT::getVectorVT(getVectorElementType(), Pow2NElts);
}
/// getScalarType - If this is a vector type, return the element type,
/// otherwise return this.
MVT getScalarType() const {
return isVector() ? getVectorElementType() : *this;
}
MVT getVectorElementType() const {
switch (SimpleTy) {
default:
llvm_unreachable("Not a vector MVT!");
case v2i1 :
case v4i1 :
case v8i1 :
case v16i1 :
case v32i1 :
case v64i1: return i1;
case v1i8 :
case v2i8 :
case v4i8 :
case v8i8 :
case v16i8:
case v32i8:
case v64i8: return i8;
case v1i16:
case v2i16:
case v4i16:
case v8i16:
case v16i16:
case v32i16: return i16;
case v1i32:
case v2i32:
case v4i32:
case v8i32:
case v16i32: return i32;
case v1i64:
case v2i64:
case v4i64:
case v8i64:
case v16i64: return i64;
case v1i128: return i128;
case v2f16:
case v4f16:
case v8f16: return f16;
case v1f32:
case v2f32:
case v4f32:
case v8f32:
case v16f32: return f32;
case v1f64:
case v2f64:
case v4f64:
case v8f64: return f64;
}
}
unsigned getVectorNumElements() const {
switch (SimpleTy) {
default:
llvm_unreachable("Not a vector MVT!");
case v32i1:
case v32i8:
case v32i16: return 32;
case v64i1:
case v64i8: return 64;
case v16i1:
case v16i8:
case v16i16:
case v16i32:
case v16i64:
case v16f32: return 16;
case v8i1 :
case v8i8 :
case v8i16:
case v8i32:
case v8i64:
case v8f16:
case v8f32:
case v8f64: return 8;
case v4i1:
case v4i8:
case v4i16:
case v4i32:
case v4i64:
case v4f16:
case v4f32:
case v4f64: return 4;
case v2i1:
case v2i8:
case v2i16:
case v2i32:
case v2i64:
case v2f16:
case v2f32:
case v2f64: return 2;
case v1i8:
case v1i16:
case v1i32:
case v1i64:
case v1i128:
case v1f32:
case v1f64: return 1;
}
}
unsigned getSizeInBits() const {
switch (SimpleTy) {
default:
llvm_unreachable("getSizeInBits called on extended MVT.");
case Other:
llvm_unreachable("Value type is non-standard value, Other.");
case iPTR:
llvm_unreachable("Value type size is target-dependent. Ask TLI.");
case iPTRAny:
case iAny:
case fAny:
case vAny:
case Any:
llvm_unreachable("Value type is overloaded.");
case Metadata:
llvm_unreachable("Value type is metadata.");
case i1 : return 1;
case v2i1: return 2;
case v4i1: return 4;
case i8 :
case v1i8:
case v8i1: return 8;
case i16 :
case f16:
case v16i1:
case v2i8:
case v1i16: return 16;
case f32 :
case i32 :
case v32i1:
case v4i8:
case v2i16:
case v2f16:
case v1f32:
case v1i32: return 32;
case x86mmx:
case f64 :
case i64 :
case v64i1:
case v8i8:
case v4i16:
case v2i32:
case v1i64:
case v4f16:
case v2f32:
case v1f64: return 64;
case f80 : return 80;
case f128:
case ppcf128:
case i128:
case v16i8:
case v8i16:
case v4i32:
case v2i64:
case v1i128:
case v8f16:
case v4f32:
case v2f64: return 128;
case v32i8:
case v16i16:
case v8i32:
case v4i64:
case v8f32:
case v4f64: return 256;
case v64i8:
case v32i16:
case v16i32:
case v8i64:
case v16f32:
case v8f64: return 512;
case v16i64:return 1024;
}
}
unsigned getScalarSizeInBits() const {
return getScalarType().getSizeInBits();
}
/// getStoreSize - Return the number of bytes overwritten by a store
/// of the specified value type.
unsigned getStoreSize() const {
return (getSizeInBits() + 7) / 8;
}
/// getStoreSizeInBits - Return the number of bits overwritten by a store
/// of the specified value type.
unsigned getStoreSizeInBits() const {
return getStoreSize() * 8;
}
/// Return true if this has more bits than VT.
bool bitsGT(MVT VT) const {
return getSizeInBits() > VT.getSizeInBits();
}
/// Return true if this has no less bits than VT.
bool bitsGE(MVT VT) const {
return getSizeInBits() >= VT.getSizeInBits();
}
/// Return true if this has less bits than VT.
bool bitsLT(MVT VT) const {
return getSizeInBits() < VT.getSizeInBits();
}
/// Return true if this has no more bits than VT.
bool bitsLE(MVT VT) const {
return getSizeInBits() <= VT.getSizeInBits();
}
static MVT getFloatingPointVT(unsigned BitWidth) {
switch (BitWidth) {
default:
llvm_unreachable("Bad bit width!");
case 16:
return MVT::f16;
case 32:
return MVT::f32;
case 64:
return MVT::f64;
case 80:
return MVT::f80;
case 128:
return MVT::f128;
}
}
static MVT getIntegerVT(unsigned BitWidth) {
switch (BitWidth) {
default:
return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
case 1:
return MVT::i1;
case 8:
return MVT::i8;
case 16:
return MVT::i16;
case 32:
return MVT::i32;
case 64:
return MVT::i64;
case 128:
return MVT::i128;
}
}
static MVT getVectorVT(MVT VT, unsigned NumElements) {
switch (VT.SimpleTy) {
default:
break;
case MVT::i1:
if (NumElements == 2) return MVT::v2i1;
if (NumElements == 4) return MVT::v4i1;
if (NumElements == 8) return MVT::v8i1;
if (NumElements == 16) return MVT::v16i1;
if (NumElements == 32) return MVT::v32i1;
if (NumElements == 64) return MVT::v64i1;
break;
case MVT::i8:
if (NumElements == 1) return MVT::v1i8;
if (NumElements == 2) return MVT::v2i8;
if (NumElements == 4) return MVT::v4i8;
if (NumElements == 8) return MVT::v8i8;
if (NumElements == 16) return MVT::v16i8;
if (NumElements == 32) return MVT::v32i8;
if (NumElements == 64) return MVT::v64i8;
break;
case MVT::i16:
if (NumElements == 1) return MVT::v1i16;
if (NumElements == 2) return MVT::v2i16;
if (NumElements == 4) return MVT::v4i16;
if (NumElements == 8) return MVT::v8i16;
if (NumElements == 16) return MVT::v16i16;
if (NumElements == 32) return MVT::v32i16;
break;
case MVT::i32:
if (NumElements == 1) return MVT::v1i32;
if (NumElements == 2) return MVT::v2i32;
if (NumElements == 4) return MVT::v4i32;
if (NumElements == 8) return MVT::v8i32;
if (NumElements == 16) return MVT::v16i32;
break;
case MVT::i64:
if (NumElements == 1) return MVT::v1i64;
if (NumElements == 2) return MVT::v2i64;
if (NumElements == 4) return MVT::v4i64;
if (NumElements == 8) return MVT::v8i64;
if (NumElements == 16) return MVT::v16i64;
break;
case MVT::i128:
if (NumElements == 1) return MVT::v1i128;
break;
case MVT::f16:
if (NumElements == 2) return MVT::v2f16;
if (NumElements == 4) return MVT::v4f16;
if (NumElements == 8) return MVT::v8f16;
break;
case MVT::f32:
if (NumElements == 1) return MVT::v1f32;
if (NumElements == 2) return MVT::v2f32;
if (NumElements == 4) return MVT::v4f32;
if (NumElements == 8) return MVT::v8f32;
if (NumElements == 16) return MVT::v16f32;
break;
case MVT::f64:
if (NumElements == 1) return MVT::v1f64;
if (NumElements == 2) return MVT::v2f64;
if (NumElements == 4) return MVT::v4f64;
if (NumElements == 8) return MVT::v8f64;
break;
}
return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
}
/// Return the value type corresponding to the specified type. This returns
/// all pointers as iPTR. If HandleUnknown is true, unknown types are
/// returned as Other, otherwise they are invalid.
static MVT getVT(Type *Ty, bool HandleUnknown = false);
private:
/// A simple iterator over the MVT::SimpleValueType enum.
struct mvt_iterator {
SimpleValueType VT;
mvt_iterator(SimpleValueType VT) : VT(VT) {}
MVT operator*() const { return VT; }
bool operator!=(const mvt_iterator &LHS) const { return VT != LHS.VT; }
mvt_iterator& operator++() {
VT = (MVT::SimpleValueType)((int)VT + 1);
assert((int)VT <= MVT::MAX_ALLOWED_VALUETYPE &&
"MVT iterator overflowed.");
return *this;
}
};
/// A range of the MVT::SimpleValueType enum.
typedef iterator_range<mvt_iterator> mvt_range;
public:
/// SimpleValueType Iteration
/// @{
static mvt_range all_valuetypes() {
return mvt_range(MVT::FIRST_VALUETYPE, MVT::LAST_VALUETYPE);
}
static mvt_range integer_valuetypes() {
return mvt_range(MVT::FIRST_INTEGER_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_INTEGER_VALUETYPE + 1));
}
static mvt_range fp_valuetypes() {
return mvt_range(MVT::FIRST_FP_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_FP_VALUETYPE + 1));
}
static mvt_range vector_valuetypes() {
return mvt_range(MVT::FIRST_VECTOR_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_VECTOR_VALUETYPE + 1));
}
static mvt_range integer_vector_valuetypes() {
return mvt_range(
MVT::FIRST_INTEGER_VECTOR_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_INTEGER_VECTOR_VALUETYPE + 1));
}
static mvt_range fp_vector_valuetypes() {
return mvt_range(
MVT::FIRST_FP_VECTOR_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_FP_VECTOR_VALUETYPE + 1));
}
/// @}
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/VirtRegMap.h | //===-- llvm/CodeGen/VirtRegMap.h - Virtual Register Map -*- C++ -*--------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a virtual register map. This maps virtual registers to
// physical registers and virtual registers to stack slots. It is created and
// updated by a register allocator and then used by a machine code rewriter that
// adds spill code and rewrites virtual into physical register references.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_VIRTREGMAP_H
#define LLVM_CODEGEN_VIRTREGMAP_H
#include "llvm/ADT/IndexedMap.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
class MachineInstr;
class MachineFunction;
class MachineRegisterInfo;
class TargetInstrInfo;
class raw_ostream;
class SlotIndexes;
class VirtRegMap : public MachineFunctionPass {
public:
enum {
NO_PHYS_REG = 0,
NO_STACK_SLOT = (1L << 30)-1,
MAX_STACK_SLOT = (1L << 18)-1
};
private:
MachineRegisterInfo *MRI;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
MachineFunction *MF;
/// Virt2PhysMap - This is a virtual to physical register
/// mapping. Each virtual register is required to have an entry in
/// it; even spilled virtual registers (the register mapped to a
/// spilled register is the temporary used to load it from the
/// stack).
IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2PhysMap;
/// Virt2StackSlotMap - This is virtual register to stack slot
/// mapping. Each spilled virtual register has an entry in it
/// which corresponds to the stack slot this register is spilled
/// at.
IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;
/// Virt2SplitMap - This is virtual register to splitted virtual register
/// mapping.
IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2SplitMap;
/// createSpillSlot - Allocate a spill slot for RC from MFI.
unsigned createSpillSlot(const TargetRegisterClass *RC);
VirtRegMap(const VirtRegMap&) = delete;
void operator=(const VirtRegMap&) = delete;
public:
static char ID;
VirtRegMap() : MachineFunctionPass(ID), Virt2PhysMap(NO_PHYS_REG),
Virt2StackSlotMap(NO_STACK_SLOT), Virt2SplitMap(0) { }
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
MachineFunction &getMachineFunction() const {
assert(MF && "getMachineFunction called before runOnMachineFunction");
return *MF;
}
MachineRegisterInfo &getRegInfo() const { return *MRI; }
const TargetRegisterInfo &getTargetRegInfo() const { return *TRI; }
void grow();
/// @brief returns true if the specified virtual register is
/// mapped to a physical register
bool hasPhys(unsigned virtReg) const {
return getPhys(virtReg) != NO_PHYS_REG;
}
/// @brief returns the physical register mapped to the specified
/// virtual register
unsigned getPhys(unsigned virtReg) const {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
return Virt2PhysMap[virtReg];
}
/// @brief creates a mapping for the specified virtual register to
/// the specified physical register
void assignVirt2Phys(unsigned virtReg, unsigned physReg) {
assert(TargetRegisterInfo::isVirtualRegister(virtReg) &&
TargetRegisterInfo::isPhysicalRegister(physReg));
assert(Virt2PhysMap[virtReg] == NO_PHYS_REG &&
"attempt to assign physical register to already mapped "
"virtual register");
Virt2PhysMap[virtReg] = physReg;
}
/// @brief clears the specified virtual register's, physical
/// register mapping
void clearVirt(unsigned virtReg) {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
assert(Virt2PhysMap[virtReg] != NO_PHYS_REG &&
"attempt to clear a not assigned virtual register");
Virt2PhysMap[virtReg] = NO_PHYS_REG;
}
/// @brief clears all virtual to physical register mappings
void clearAllVirt() {
Virt2PhysMap.clear();
grow();
}
/// @brief returns true if VirtReg is assigned to its preferred physreg.
bool hasPreferredPhys(unsigned VirtReg);
/// @brief returns true if VirtReg has a known preferred register.
/// This returns false if VirtReg has a preference that is a virtual
/// register that hasn't been assigned yet.
bool hasKnownPreference(unsigned VirtReg);
/// @brief records virtReg is a split live interval from SReg.
void setIsSplitFromReg(unsigned virtReg, unsigned SReg) {
Virt2SplitMap[virtReg] = SReg;
}
/// @brief returns the live interval virtReg is split from.
unsigned getPreSplitReg(unsigned virtReg) const {
return Virt2SplitMap[virtReg];
}
/// getOriginal - Return the original virtual register that VirtReg descends
/// from through splitting.
/// A register that was not created by splitting is its own original.
/// This operation is idempotent.
unsigned getOriginal(unsigned VirtReg) const {
unsigned Orig = getPreSplitReg(VirtReg);
return Orig ? Orig : VirtReg;
}
/// @brief returns true if the specified virtual register is not
/// mapped to a stack slot or rematerialized.
bool isAssignedReg(unsigned virtReg) const {
if (getStackSlot(virtReg) == NO_STACK_SLOT)
return true;
// Split register can be assigned a physical register as well as a
// stack slot or remat id.
return (Virt2SplitMap[virtReg] && Virt2PhysMap[virtReg] != NO_PHYS_REG);
}
/// @brief returns the stack slot mapped to the specified virtual
/// register
int getStackSlot(unsigned virtReg) const {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
return Virt2StackSlotMap[virtReg];
}
/// @brief create a mapping for the specifed virtual register to
/// the next available stack slot
int assignVirt2StackSlot(unsigned virtReg);
/// @brief create a mapping for the specified virtual register to
/// the specified stack slot
void assignVirt2StackSlot(unsigned virtReg, int frameIndex);
void print(raw_ostream &OS, const Module* M = nullptr) const override;
void dump() const;
};
inline raw_ostream &operator<<(raw_ostream &OS, const VirtRegMap &VRM) {
VRM.print(OS);
return OS;
}
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineBranchProbabilityInfo.h | //=- MachineBranchProbabilityInfo.h - Branch Probability Analysis -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass is used to evaluate branch probabilties on machine basic blocks.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H
#define LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Pass.h"
#include "llvm/Support/BranchProbability.h"
#include <climits>
namespace llvm {
class MachineBranchProbabilityInfo : public ImmutablePass {
virtual void anchor();
// Default weight value. Used when we don't have information about the edge.
// TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
// the successors have a weight yet. But it doesn't make sense when providing
// weight to an edge that may have siblings with non-zero weights. This can
// be handled various ways, but it's probably fine for an edge with unknown
// weight to just "inherit" the non-zero weight of an adjacent successor.
static const uint32_t DEFAULT_WEIGHT = 16;
public:
static char ID;
MachineBranchProbabilityInfo() : ImmutablePass(ID) {
PassRegistry &Registry = *PassRegistry::getPassRegistry();
initializeMachineBranchProbabilityInfoPass(Registry);
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
// Return edge weight. If we don't have any informations about it - return
// DEFAULT_WEIGHT.
uint32_t getEdgeWeight(const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const;
// Same thing, but using a const_succ_iterator from Src. This is faster when
// the iterator is already available.
uint32_t getEdgeWeight(const MachineBasicBlock *Src,
MachineBasicBlock::const_succ_iterator Dst) const;
// Get sum of the block successors' weights, potentially scaling them to fit
// within 32-bits. If scaling is required, sets Scale based on the necessary
// adjustment. Any edge weights used with the sum should be divided by Scale.
uint32_t getSumForBlock(const MachineBasicBlock *MBB, uint32_t &Scale) const;
// A 'Hot' edge is an edge which probability is >= 80%.
bool isEdgeHot(const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const;
// Return a hot successor for the block BB or null if there isn't one.
// NB: This routine's complexity is linear on the number of successors.
MachineBasicBlock *getHotSucc(MachineBasicBlock *MBB) const;
// Return a probability as a fraction between 0 (0% probability) and
// 1 (100% probability), however the value is never equal to 0, and can be 1
// only iff SRC block has only one successor.
// NB: This routine's complexity is linear on the number of successors of
// Src. Querying sequentially for each successor's probability is a quadratic
// query pattern.
BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const;
// Print value between 0 (0% probability) and 1 (100% probability),
// however the value is never equal to 0, and can be 1 only iff SRC block
// has only one successor.
raw_ostream &printEdgeProbability(raw_ostream &OS,
const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const;
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/AsmPrinter.h | //===-- llvm/CodeGen/AsmPrinter.h - AsmPrinter Framework --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a class to be used as the base class for target specific
// asm writers. This class primarily handles common functionality used by
// all asm writers.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_ASMPRINTER_H
#define LLVM_CODEGEN_ASMPRINTER_H
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/DwarfStringPoolEntry.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
class AsmPrinterHandler;
class BlockAddress;
class ByteStreamer;
class GCStrategy;
class Constant;
class ConstantArray;
class DIE;
class DIEAbbrev;
class GCMetadataPrinter;
class GlobalValue;
class GlobalVariable;
class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class MachineLocation;
class MachineLoopInfo;
class MachineLoop;
class MachineConstantPoolValue;
class MachineJumpTableInfo;
class MachineModuleInfo;
class MCAsmInfo;
class MCCFIInstruction;
class MCContext;
class MCExpr;
class MCInst;
class MCSection;
class MCStreamer;
class MCSubtargetInfo;
class MCSymbol;
class MCTargetOptions;
class MDNode;
class DwarfDebug;
class Mangler;
class TargetLoweringObjectFile;
class DataLayout;
class TargetMachine;
/// This class is intended to be used as a driving class for all asm writers.
class AsmPrinter : public MachineFunctionPass {
public:
/// Target machine description.
///
TargetMachine &TM;
/// Target Asm Printer information.
///
const MCAsmInfo *MAI;
/// This is the context for the output file that we are streaming. This owns
/// all of the global MC-related objects for the generated translation unit.
MCContext &OutContext;
/// This is the MCStreamer object for the file we are generating. This
/// contains the transient state for the current translation unit that we are
/// generating (such as the current section etc).
std::unique_ptr<MCStreamer> OutStreamer;
/// The current machine function.
const MachineFunction *MF;
/// This is a pointer to the current MachineModuleInfo.
MachineModuleInfo *MMI;
/// Name-mangler for global names.
///
Mangler *Mang;
/// The symbol for the current function. This is recalculated at the beginning
/// of each call to runOnMachineFunction().
///
MCSymbol *CurrentFnSym;
/// The symbol used to represent the start of the current function for the
/// purpose of calculating its size (e.g. using the .size directive). By
/// default, this is equal to CurrentFnSym.
MCSymbol *CurrentFnSymForSize;
/// Map global GOT equivalent MCSymbols to GlobalVariables and keep track of
/// its number of uses by other globals.
typedef std::pair<const GlobalVariable *, unsigned> GOTEquivUsePair;
MapVector<const MCSymbol *, GOTEquivUsePair> GlobalGOTEquivs;
private:
MCSymbol *CurrentFnBegin;
MCSymbol *CurrentFnEnd;
MCSymbol *CurExceptionSym;
// The garbage collection metadata printer table.
void *GCMetadataPrinters; // Really a DenseMap.
/// Emit comments in assembly output if this is true.
///
bool VerboseAsm;
static char ID;
/// If VerboseAsm is set, a pointer to the loop info for this function.
MachineLoopInfo *LI;
struct HandlerInfo {
AsmPrinterHandler *Handler;
const char *TimerName, *TimerGroupName;
HandlerInfo(AsmPrinterHandler *Handler, const char *TimerName,
const char *TimerGroupName)
: Handler(Handler), TimerName(TimerName),
TimerGroupName(TimerGroupName) {}
};
/// A vector of all debug/EH info emitters we should use. This vector
/// maintains ownership of the emitters.
SmallVector<HandlerInfo, 1> Handlers;
/// If the target supports dwarf debug info, this pointer is non-null.
DwarfDebug *DD;
protected:
explicit AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer);
public:
~AsmPrinter() override;
DwarfDebug *getDwarfDebug() { return DD; }
DwarfDebug *getDwarfDebug() const { return DD; }
/// Return true if assembly output should contain comments.
///
bool isVerbose() const { return VerboseAsm; }
/// Return a unique ID for the current function.
///
unsigned getFunctionNumber() const;
MCSymbol *getFunctionBegin() const { return CurrentFnBegin; }
MCSymbol *getFunctionEnd() const { return CurrentFnEnd; }
MCSymbol *getCurExceptionSym();
/// Return information about object file lowering.
const TargetLoweringObjectFile &getObjFileLowering() const;
/// Return information about data layout.
const DataLayout &getDataLayout() const;
/// Return information about subtarget.
const MCSubtargetInfo &getSubtargetInfo() const;
void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
/// Return the target triple string.
StringRef getTargetTriple() const;
/// Return the current section we are emitting to.
const MCSection *getCurrentSection() const;
void getNameWithPrefix(SmallVectorImpl<char> &Name,
const GlobalValue *GV) const;
MCSymbol *getSymbol(const GlobalValue *GV) const;
//===------------------------------------------------------------------===//
// MachineFunctionPass Implementation.
//===------------------------------------------------------------------===//
/// Record analysis usage.
///
void getAnalysisUsage(AnalysisUsage &AU) const override;
/// Set up the AsmPrinter when we are working on a new module. If your pass
/// overrides this, it must make sure to explicitly call this implementation.
bool doInitialization(Module &M) override;
/// Shut down the asmprinter. If you override this in your pass, you must make
/// sure to call it explicitly.
bool doFinalization(Module &M) override;
/// Emit the specified function out to the OutStreamer.
bool runOnMachineFunction(MachineFunction &MF) override {
SetupMachineFunction(MF);
EmitFunctionBody();
return false;
}
//===------------------------------------------------------------------===//
// Coarse grained IR lowering routines.
//===------------------------------------------------------------------===//
/// This should be called when a new MachineFunction is being processed from
/// runOnMachineFunction.
void SetupMachineFunction(MachineFunction &MF);
/// This method emits the body and trailer for a function.
void EmitFunctionBody();
void emitCFIInstruction(const MachineInstr &MI);
void emitFrameAlloc(const MachineInstr &MI);
enum CFIMoveType { CFI_M_None, CFI_M_EH, CFI_M_Debug };
CFIMoveType needsCFIMoves();
bool needsSEHMoves();
/// Print to the current output stream assembly representations of the
/// constants in the constant pool MCP. This is used to print out constants
/// which have been "spilled to memory" by the code generator.
///
virtual void EmitConstantPool();
/// Print assembly representations of the jump tables used by the current
/// function to the current output stream.
///
void EmitJumpTableInfo();
/// Emit the specified global variable to the .s file.
virtual void EmitGlobalVariable(const GlobalVariable *GV);
/// Check to see if the specified global is a special global used by LLVM. If
/// so, emit it and return true, otherwise do nothing and return false.
bool EmitSpecialLLVMGlobal(const GlobalVariable *GV);
/// Emit an alignment directive to the specified power of two boundary. For
/// example, if you pass in 3 here, you will get an 8 byte alignment. If a
/// global value is specified, and if that global has an explicit alignment
/// requested, it will override the alignment request if required for
/// correctness.
///
void EmitAlignment(unsigned NumBits, const GlobalObject *GO = nullptr) const;
/// Lower the specified LLVM Constant to an MCExpr.
const MCExpr *lowerConstant(const Constant *CV);
/// \brief Print a general LLVM constant to the .s file.
void EmitGlobalConstant(const Constant *CV);
/// \brief Unnamed constant global variables solely contaning a pointer to
/// another globals variable act like a global variable "proxy", or GOT
/// equivalents, i.e., it's only used to hold the address of the latter. One
/// optimization is to replace accesses to these proxies by using the GOT
/// entry for the final global instead. Hence, we select GOT equivalent
/// candidates among all the module global variables, avoid emitting them
/// unnecessarily and finally replace references to them by pc relative
/// accesses to GOT entries.
void computeGlobalGOTEquivs(Module &M);
/// \brief Constant expressions using GOT equivalent globals may not be
/// eligible for PC relative GOT entry conversion, in such cases we need to
/// emit the proxies we previously omitted in EmitGlobalVariable.
void emitGlobalGOTEquivs();
//===------------------------------------------------------------------===//
// Overridable Hooks
//===------------------------------------------------------------------===//
// Targets can, or in the case of EmitInstruction, must implement these to
// customize output.
/// This virtual method can be overridden by targets that want to emit
/// something at the start of their file.
virtual void EmitStartOfAsmFile(Module &) {}
/// This virtual method can be overridden by targets that want to emit
/// something at the end of their file.
virtual void EmitEndOfAsmFile(Module &) {}
/// Targets can override this to emit stuff before the first basic block in
/// the function.
virtual void EmitFunctionBodyStart() {}
/// Targets can override this to emit stuff after the last basic block in the
/// function.
virtual void EmitFunctionBodyEnd() {}
/// Targets can override this to emit stuff at the start of a basic block.
/// By default, this method prints the label for the specified
/// MachineBasicBlock, an alignment (if present) and a comment describing it
/// if appropriate.
virtual void EmitBasicBlockStart(const MachineBasicBlock &MBB) const;
/// Targets can override this to emit stuff at the end of a basic block.
virtual void EmitBasicBlockEnd(const MachineBasicBlock &MBB) {}
/// Targets should implement this to emit instructions.
virtual void EmitInstruction(const MachineInstr *) {
llvm_unreachable("EmitInstruction not implemented");
}
/// Return the symbol for the specified constant pool entry.
virtual MCSymbol *GetCPISymbol(unsigned CPID) const;
virtual void EmitFunctionEntryLabel();
virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
/// Targets can override this to change how global constants that are part of
/// a C++ static/global constructor list are emitted.
virtual void EmitXXStructor(const Constant *CV) { EmitGlobalConstant(CV); }
/// Return true if the basic block has exactly one predecessor and the control
/// transfer mechanism between the predecessor and this block is a
/// fall-through.
virtual bool
isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
/// Targets can override this to customize the output of IMPLICIT_DEF
/// instructions in verbose mode.
virtual void emitImplicitDef(const MachineInstr *MI) const;
//===------------------------------------------------------------------===//
// Symbol Lowering Routines.
//===------------------------------------------------------------------===//
public:
MCSymbol *createTempSymbol(const Twine &Name) const;
/// Return the MCSymbol for a private symbol with global value name as its
/// base, with the specified suffix.
MCSymbol *getSymbolWithGlobalValueBase(const GlobalValue *GV,
StringRef Suffix) const;
/// Return the MCSymbol for the specified ExternalSymbol.
MCSymbol *GetExternalSymbolSymbol(StringRef Sym) const;
/// Return the symbol for the specified jump table entry.
MCSymbol *GetJTISymbol(unsigned JTID, bool isLinkerPrivate = false) const;
/// Return the symbol for the specified jump table .set
/// FIXME: privatize to AsmPrinter.
MCSymbol *GetJTSetSymbol(unsigned UID, unsigned MBBID) const;
/// Return the MCSymbol used to satisfy BlockAddress uses of the specified
/// basic block.
MCSymbol *GetBlockAddressSymbol(const BlockAddress *BA) const;
MCSymbol *GetBlockAddressSymbol(const BasicBlock *BB) const;
//===------------------------------------------------------------------===//
// Emission Helper Routines.
//===------------------------------------------------------------------===//
public:
/// This is just convenient handler for printing offsets.
void printOffset(int64_t Offset, raw_ostream &OS) const;
/// Emit a byte directive and value.
///
void EmitInt8(int Value) const;
/// Emit a short directive and value.
///
void EmitInt16(int Value) const;
/// Emit a long directive and value.
///
void EmitInt32(int Value) const;
/// Emit something like ".long Hi-Lo" where the size in bytes of the directive
/// is specified by Size and Hi/Lo specify the labels. This implicitly uses
/// .set if it is available.
void EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
unsigned Size) const;
/// Emit something like ".long Label+Offset" where the size in bytes of the
/// directive is specified by Size and Label specifies the label. This
/// implicitly uses .set if it is available.
void EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
unsigned Size, bool IsSectionRelative = false) const;
/// Emit something like ".long Label" where the size in bytes of the directive
/// is specified by Size and Label specifies the label.
void EmitLabelReference(const MCSymbol *Label, unsigned Size,
bool IsSectionRelative = false) const {
EmitLabelPlusOffset(Label, 0, Size, IsSectionRelative);
}
//===------------------------------------------------------------------===//
// Dwarf Emission Helper Routines
//===------------------------------------------------------------------===//
/// Emit the specified signed leb128 value.
void EmitSLEB128(int64_t Value, const char *Desc = nullptr) const;
/// Emit the specified unsigned leb128 value.
void EmitULEB128(uint64_t Value, const char *Desc = nullptr,
unsigned PadTo = 0) const;
/// Emit a .byte 42 directive for a DW_CFA_xxx value.
void EmitCFAByte(unsigned Val) const;
/// Emit a .byte 42 directive that corresponds to an encoding. If verbose
/// assembly output is enabled, we output comments describing the encoding.
/// Desc is a string saying what the encoding is specifying (e.g. "LSDA").
void EmitEncodingByte(unsigned Val, const char *Desc = nullptr) const;
/// Return the size of the encoding in bytes.
unsigned GetSizeOfEncodedValue(unsigned Encoding) const;
/// Emit reference to a ttype global with a specified encoding.
void EmitTTypeReference(const GlobalValue *GV, unsigned Encoding) const;
/// Emit a reference to a symbol for use in dwarf. Different object formats
/// represent this in different ways. Some use a relocation others encode
/// the label offset in its section.
void emitDwarfSymbolReference(const MCSymbol *Label,
bool ForceOffset = false) const;
/// Emit the 4-byte offset of a string from the start of its section.
///
/// When possible, emit a DwarfStringPool section offset without any
/// relocations, and without using the symbol. Otherwise, defers to \a
/// emitDwarfSymbolReference().
void emitDwarfStringOffset(DwarfStringPoolEntryRef S) const;
/// Get the value for DW_AT_APPLE_isa. Zero if no isa encoding specified.
virtual unsigned getISAEncoding() { return 0; }
/// EmitDwarfRegOp - Emit a dwarf register operation.
virtual void EmitDwarfRegOp(ByteStreamer &BS,
const MachineLocation &MLoc) const;
//===------------------------------------------------------------------===//
// Dwarf Lowering Routines
//===------------------------------------------------------------------===//
/// \brief Emit frame instruction to describe the layout of the frame.
void emitCFIInstruction(const MCCFIInstruction &Inst) const;
/// \brief Emit Dwarf abbreviation table.
void emitDwarfAbbrevs(const std::vector<DIEAbbrev *>& Abbrevs) const;
/// \brief Recursively emit Dwarf DIE tree.
void emitDwarfDIE(const DIE &Die) const;
//===------------------------------------------------------------------===//
// Inline Asm Support
//===------------------------------------------------------------------===//
public:
// These are hooks that targets can override to implement inline asm
// support. These should probably be moved out of AsmPrinter someday.
/// Print information related to the specified machine instr that is
/// independent of the operand, and may be independent of the instr itself.
/// This can be useful for portably encoding the comment character or other
/// bits of target-specific knowledge into the asmstrings. The syntax used is
/// ${:comment}. Targets can override this to add support for their own
/// strange codes.
virtual void PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
const char *Code) const;
/// Print the specified operand of MI, an INLINEASM instruction, using the
/// specified assembler variant. Targets should override this to format as
/// appropriate. This method can return true if the operand is erroneous.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &OS);
/// Print the specified operand of MI, an INLINEASM instruction, using the
/// specified assembler variant as an address. Targets should override this to
/// format as appropriate. This method can return true if the operand is
/// erroneous.
virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &OS);
/// Let the target do anything it needs to do before emitting inlineasm.
/// \p StartInfo - the subtarget info before parsing inline asm
virtual void emitInlineAsmStart() const;
/// Let the target do anything it needs to do after emitting inlineasm.
/// This callback can be used restore the original mode in case the
/// inlineasm contains directives to switch modes.
/// \p StartInfo - the original subtarget info before inline asm
/// \p EndInfo - the final subtarget info after parsing the inline asm,
/// or NULL if the value is unknown.
virtual void emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
const MCSubtargetInfo *EndInfo) const;
private:
/// Private state for PrintSpecial()
// Assign a unique ID to this machine instruction.
mutable const MachineInstr *LastMI;
mutable unsigned LastFn;
mutable unsigned Counter;
/// This method emits the header for the current function.
virtual void EmitFunctionHeader();
/// Emit a blob of inline asm to the output streamer.
void
EmitInlineAsm(StringRef Str, const MCSubtargetInfo &STI,
const MCTargetOptions &MCOptions,
const MDNode *LocMDNode = nullptr,
InlineAsm::AsmDialect AsmDialect = InlineAsm::AD_ATT) const;
/// This method formats and emits the specified machine instruction that is an
/// inline asm.
void EmitInlineAsm(const MachineInstr *MI) const;
//===------------------------------------------------------------------===//
// Internal Implementation Details
//===------------------------------------------------------------------===//
/// This emits visibility information about symbol, if this is suported by the
/// target.
void EmitVisibility(MCSymbol *Sym, unsigned Visibility,
bool IsDefinition = true) const;
void EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const;
void EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB, unsigned uid) const;
void EmitLLVMUsedList(const ConstantArray *InitList);
/// Emit llvm.ident metadata in an '.ident' directive.
void EmitModuleIdents(Module &M);
void EmitXXStructorList(const Constant *List, bool isCtor);
GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy &C);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/SlotIndexes.h | //===- llvm/CodeGen/SlotIndexes.h - Slot indexes representation -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements SlotIndex and related classes. The purpose of SlotIndex
// is to describe a position at which a register can become live, or cease to
// be live.
//
// SlotIndex is mostly a proxy for entries of the SlotIndexList, a class which
// is held is LiveIntervals and provides the real numbering. This allows
// LiveIntervals to perform largely transparent renumbering.
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_SLOTINDEXES_H
#define LLVM_CODEGEN_SLOTINDEXES_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/ilist.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/Support/Allocator.h"
namespace llvm {
/// This class represents an entry in the slot index list held in the
/// SlotIndexes pass. It should not be used directly. See the
/// SlotIndex & SlotIndexes classes for the public interface to this
/// information.
class IndexListEntry : public ilist_node<IndexListEntry> {
MachineInstr *mi;
unsigned index;
public:
IndexListEntry(MachineInstr *mi, unsigned index) : mi(mi), index(index) {}
MachineInstr* getInstr() const { return mi; }
void setInstr(MachineInstr *mi) {
this->mi = mi;
}
unsigned getIndex() const { return index; }
void setIndex(unsigned index) {
this->index = index;
}
#ifdef EXPENSIVE_CHECKS
// When EXPENSIVE_CHECKS is defined, "erased" index list entries will
// actually be moved to a "graveyard" list, and have their pointers
// poisoned, so that dangling SlotIndex access can be reliably detected.
void setPoison() {
intptr_t tmp = reinterpret_cast<intptr_t>(mi);
assert(((tmp & 0x1) == 0x0) && "Pointer already poisoned?");
tmp |= 0x1;
mi = reinterpret_cast<MachineInstr*>(tmp);
}
bool isPoisoned() const { return (reinterpret_cast<intptr_t>(mi) & 0x1) == 0x1; }
#endif // EXPENSIVE_CHECKS
};
template <>
struct ilist_traits<IndexListEntry> : public ilist_default_traits<IndexListEntry> {
private:
mutable ilist_half_node<IndexListEntry> Sentinel;
public:
// HLSL Change Starts
// Temporarily disable "downcast of address" UBSAN runtime error
// https://github.com/microsoft/DirectXShaderCompiler/issues/6446
#ifdef __has_feature
#if __has_feature(undefined_behavior_sanitizer)
__attribute__((no_sanitize("undefined")))
#endif // __has_feature(address_sanitizer)
#endif // defined(__has_feature)
// HLSL Change Ends
IndexListEntry *
createSentinel() const {
return static_cast<IndexListEntry*>(&Sentinel);
}
void destroySentinel(IndexListEntry *) const {}
IndexListEntry *provideInitialHead() const { return createSentinel(); }
IndexListEntry *ensureHead(IndexListEntry*) const { return createSentinel(); }
static void noteHead(IndexListEntry*, IndexListEntry*) {}
void deleteNode(IndexListEntry *N) {}
private:
void createNode(const IndexListEntry &);
};
/// SlotIndex - An opaque wrapper around machine indexes.
class SlotIndex {
friend class SlotIndexes;
enum Slot {
/// Basic block boundary. Used for live ranges entering and leaving a
/// block without being live in the layout neighbor. Also used as the
/// def slot of PHI-defs.
Slot_Block,
/// Early-clobber register use/def slot. A live range defined at
/// Slot_EarlyCLobber interferes with normal live ranges killed at
/// Slot_Register. Also used as the kill slot for live ranges tied to an
/// early-clobber def.
Slot_EarlyClobber,
/// Normal register use/def slot. Normal instructions kill and define
/// register live ranges at this slot.
Slot_Register,
/// Dead def kill point. Kill slot for a live range that is defined by
/// the same instruction (Slot_Register or Slot_EarlyClobber), but isn't
/// used anywhere.
Slot_Dead,
Slot_Count
};
PointerIntPair<IndexListEntry*, 2, unsigned> lie;
SlotIndex(IndexListEntry *entry, unsigned slot)
: lie(entry, slot) {}
IndexListEntry* listEntry() const {
assert(isValid() && "Attempt to compare reserved index.");
#ifdef EXPENSIVE_CHECKS
assert(!lie.getPointer()->isPoisoned() &&
"Attempt to access deleted list-entry.");
#endif // EXPENSIVE_CHECKS
return lie.getPointer();
}
unsigned getIndex() const {
return listEntry()->getIndex() | getSlot();
}
/// Returns the slot for this SlotIndex.
Slot getSlot() const {
return static_cast<Slot>(lie.getInt());
}
public:
enum {
/// The default distance between instructions as returned by distance().
/// This may vary as instructions are inserted and removed.
InstrDist = 4 * Slot_Count
};
/// Construct an invalid index.
SlotIndex() : lie(nullptr, 0) {}
// Construct a new slot index from the given one, and set the slot.
SlotIndex(const SlotIndex &li, Slot s) : lie(li.listEntry(), unsigned(s)) {
assert(lie.getPointer() != nullptr &&
"Attempt to construct index with 0 pointer.");
}
/// Returns true if this is a valid index. Invalid indicies do
/// not point into an index table, and cannot be compared.
bool isValid() const {
return lie.getPointer();
}
/// Return true for a valid index.
explicit operator bool() const { return isValid(); }
/// Print this index to the given raw_ostream.
void print(raw_ostream &os) const;
/// Dump this index to stderr.
void dump() const;
/// Compare two SlotIndex objects for equality.
bool operator==(SlotIndex other) const {
return lie == other.lie;
}
/// Compare two SlotIndex objects for inequality.
bool operator!=(SlotIndex other) const {
return lie != other.lie;
}
/// Compare two SlotIndex objects. Return true if the first index
/// is strictly lower than the second.
bool operator<(SlotIndex other) const {
return getIndex() < other.getIndex();
}
/// Compare two SlotIndex objects. Return true if the first index
/// is lower than, or equal to, the second.
bool operator<=(SlotIndex other) const {
return getIndex() <= other.getIndex();
}
/// Compare two SlotIndex objects. Return true if the first index
/// is greater than the second.
bool operator>(SlotIndex other) const {
return getIndex() > other.getIndex();
}
/// Compare two SlotIndex objects. Return true if the first index
/// is greater than, or equal to, the second.
bool operator>=(SlotIndex other) const {
return getIndex() >= other.getIndex();
}
/// isSameInstr - Return true if A and B refer to the same instruction.
static bool isSameInstr(SlotIndex A, SlotIndex B) {
return A.lie.getPointer() == B.lie.getPointer();
}
/// isEarlierInstr - Return true if A refers to an instruction earlier than
/// B. This is equivalent to A < B && !isSameInstr(A, B).
static bool isEarlierInstr(SlotIndex A, SlotIndex B) {
return A.listEntry()->getIndex() < B.listEntry()->getIndex();
}
/// Return the distance from this index to the given one.
int distance(SlotIndex other) const {
return other.getIndex() - getIndex();
}
/// Return the scaled distance from this index to the given one, where all
/// slots on the same instruction have zero distance.
int getInstrDistance(SlotIndex other) const {
return (other.listEntry()->getIndex() - listEntry()->getIndex())
/ Slot_Count;
}
/// isBlock - Returns true if this is a block boundary slot.
bool isBlock() const { return getSlot() == Slot_Block; }
/// isEarlyClobber - Returns true if this is an early-clobber slot.
bool isEarlyClobber() const { return getSlot() == Slot_EarlyClobber; }
/// isRegister - Returns true if this is a normal register use/def slot.
/// Note that early-clobber slots may also be used for uses and defs.
bool isRegister() const { return getSlot() == Slot_Register; }
/// isDead - Returns true if this is a dead def kill slot.
bool isDead() const { return getSlot() == Slot_Dead; }
/// Returns the base index for associated with this index. The base index
/// is the one associated with the Slot_Block slot for the instruction
/// pointed to by this index.
SlotIndex getBaseIndex() const {
return SlotIndex(listEntry(), Slot_Block);
}
/// Returns the boundary index for associated with this index. The boundary
/// index is the one associated with the Slot_Block slot for the instruction
/// pointed to by this index.
SlotIndex getBoundaryIndex() const {
return SlotIndex(listEntry(), Slot_Dead);
}
/// Returns the register use/def slot in the current instruction for a
/// normal or early-clobber def.
SlotIndex getRegSlot(bool EC = false) const {
return SlotIndex(listEntry(), EC ? Slot_EarlyClobber : Slot_Register);
}
/// Returns the dead def kill slot for the current instruction.
SlotIndex getDeadSlot() const {
return SlotIndex(listEntry(), Slot_Dead);
}
/// Returns the next slot in the index list. This could be either the
/// next slot for the instruction pointed to by this index or, if this
/// index is a STORE, the first slot for the next instruction.
/// WARNING: This method is considerably more expensive than the methods
/// that return specific slots (getUseIndex(), etc). If you can - please
/// use one of those methods.
SlotIndex getNextSlot() const {
Slot s = getSlot();
if (s == Slot_Dead) {
return SlotIndex(listEntry()->getNextNode(), Slot_Block);
}
return SlotIndex(listEntry(), s + 1);
}
/// Returns the next index. This is the index corresponding to the this
/// index's slot, but for the next instruction.
SlotIndex getNextIndex() const {
return SlotIndex(listEntry()->getNextNode(), getSlot());
}
/// Returns the previous slot in the index list. This could be either the
/// previous slot for the instruction pointed to by this index or, if this
/// index is a Slot_Block, the last slot for the previous instruction.
/// WARNING: This method is considerably more expensive than the methods
/// that return specific slots (getUseIndex(), etc). If you can - please
/// use one of those methods.
SlotIndex getPrevSlot() const {
Slot s = getSlot();
if (s == Slot_Block) {
return SlotIndex(listEntry()->getPrevNode(), Slot_Dead);
}
return SlotIndex(listEntry(), s - 1);
}
/// Returns the previous index. This is the index corresponding to this
/// index's slot, but for the previous instruction.
SlotIndex getPrevIndex() const {
return SlotIndex(listEntry()->getPrevNode(), getSlot());
}
};
template <> struct isPodLike<SlotIndex> { static const bool value = true; };
inline raw_ostream& operator<<(raw_ostream &os, SlotIndex li) {
li.print(os);
return os;
}
typedef std::pair<SlotIndex, MachineBasicBlock*> IdxMBBPair;
inline bool operator<(SlotIndex V, const IdxMBBPair &IM) {
return V < IM.first;
}
inline bool operator<(const IdxMBBPair &IM, SlotIndex V) {
return IM.first < V;
}
struct Idx2MBBCompare {
bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
return LHS.first < RHS.first;
}
};
/// SlotIndexes pass.
///
/// This pass assigns indexes to each instruction.
class SlotIndexes : public MachineFunctionPass {
private:
typedef ilist<IndexListEntry> IndexList;
IndexList indexList;
#ifdef EXPENSIVE_CHECKS
IndexList graveyardList;
#endif // EXPENSIVE_CHECKS
MachineFunction *mf;
typedef DenseMap<const MachineInstr*, SlotIndex> Mi2IndexMap;
Mi2IndexMap mi2iMap;
/// MBBRanges - Map MBB number to (start, stop) indexes.
SmallVector<std::pair<SlotIndex, SlotIndex>, 8> MBBRanges;
/// Idx2MBBMap - Sorted list of pairs of index of first instruction
/// and MBB id.
SmallVector<IdxMBBPair, 8> idx2MBBMap;
// IndexListEntry allocator.
BumpPtrAllocator ileAllocator;
IndexListEntry* createEntry(MachineInstr *mi, unsigned index) {
IndexListEntry *entry =
static_cast<IndexListEntry*>(
ileAllocator.Allocate(sizeof(IndexListEntry),
alignOf<IndexListEntry>()));
new (entry) IndexListEntry(mi, index);
return entry;
}
/// Renumber locally after inserting curItr.
void renumberIndexes(IndexList::iterator curItr);
public:
static char ID;
SlotIndexes() : MachineFunctionPass(ID) {
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &au) const override;
void releaseMemory() override;
bool runOnMachineFunction(MachineFunction &fn) override;
/// Dump the indexes.
void dump() const;
/// Renumber the index list, providing space for new instructions.
void renumberIndexes();
/// Repair indexes after adding and removing instructions.
void repairIndexesInRange(MachineBasicBlock *MBB,
MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End);
/// Returns the zero index for this analysis.
SlotIndex getZeroIndex() {
assert(indexList.front().getIndex() == 0 && "First index is not 0?");
return SlotIndex(&indexList.front(), 0);
}
/// Returns the base index of the last slot in this analysis.
SlotIndex getLastIndex() {
return SlotIndex(&indexList.back(), 0);
}
/// Returns true if the given machine instr is mapped to an index,
/// otherwise returns false.
bool hasIndex(const MachineInstr *instr) const {
return mi2iMap.count(instr);
}
/// Returns the base index for the given instruction.
SlotIndex getInstructionIndex(const MachineInstr *MI) const {
// Instructions inside a bundle have the same number as the bundle itself.
Mi2IndexMap::const_iterator itr = mi2iMap.find(getBundleStart(MI));
assert(itr != mi2iMap.end() && "Instruction not found in maps.");
return itr->second;
}
/// Returns the instruction for the given index, or null if the given
/// index has no instruction associated with it.
MachineInstr* getInstructionFromIndex(SlotIndex index) const {
return index.isValid() ? index.listEntry()->getInstr() : nullptr;
}
/// Returns the next non-null index, if one exists.
/// Otherwise returns getLastIndex().
SlotIndex getNextNonNullIndex(SlotIndex Index) {
IndexList::iterator I = Index.listEntry();
IndexList::iterator E = indexList.end();
while (++I != E)
if (I->getInstr())
return SlotIndex(I, Index.getSlot());
// We reached the end of the function.
return getLastIndex();
}
/// getIndexBefore - Returns the index of the last indexed instruction
/// before MI, or the start index of its basic block.
/// MI is not required to have an index.
SlotIndex getIndexBefore(const MachineInstr *MI) const {
const MachineBasicBlock *MBB = MI->getParent();
assert(MBB && "MI must be inserted inna basic block");
MachineBasicBlock::const_iterator I = MI, B = MBB->begin();
for (;;) {
if (I == B)
return getMBBStartIdx(MBB);
--I;
Mi2IndexMap::const_iterator MapItr = mi2iMap.find(I);
if (MapItr != mi2iMap.end())
return MapItr->second;
}
}
/// getIndexAfter - Returns the index of the first indexed instruction
/// after MI, or the end index of its basic block.
/// MI is not required to have an index.
SlotIndex getIndexAfter(const MachineInstr *MI) const {
const MachineBasicBlock *MBB = MI->getParent();
assert(MBB && "MI must be inserted inna basic block");
MachineBasicBlock::const_iterator I = MI, E = MBB->end();
for (;;) {
++I;
if (I == E)
return getMBBEndIdx(MBB);
Mi2IndexMap::const_iterator MapItr = mi2iMap.find(I);
if (MapItr != mi2iMap.end())
return MapItr->second;
}
}
/// Return the (start,end) range of the given basic block number.
const std::pair<SlotIndex, SlotIndex> &
getMBBRange(unsigned Num) const {
return MBBRanges[Num];
}
/// Return the (start,end) range of the given basic block.
const std::pair<SlotIndex, SlotIndex> &
getMBBRange(const MachineBasicBlock *MBB) const {
return getMBBRange(MBB->getNumber());
}
/// Returns the first index in the given basic block number.
SlotIndex getMBBStartIdx(unsigned Num) const {
return getMBBRange(Num).first;
}
/// Returns the first index in the given basic block.
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
return getMBBRange(mbb).first;
}
/// Returns the last index in the given basic block number.
SlotIndex getMBBEndIdx(unsigned Num) const {
return getMBBRange(Num).second;
}
/// Returns the last index in the given basic block.
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
return getMBBRange(mbb).second;
}
/// Returns the basic block which the given index falls in.
MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
if (MachineInstr *MI = getInstructionFromIndex(index))
return MI->getParent();
SmallVectorImpl<IdxMBBPair>::const_iterator I =
std::lower_bound(idx2MBBMap.begin(), idx2MBBMap.end(), index);
// Take the pair containing the index
SmallVectorImpl<IdxMBBPair>::const_iterator J =
((I != idx2MBBMap.end() && I->first > index) ||
(I == idx2MBBMap.end() && idx2MBBMap.size()>0)) ? (I-1): I;
assert(J != idx2MBBMap.end() && J->first <= index &&
index < getMBBEndIdx(J->second) &&
"index does not correspond to an MBB");
return J->second;
}
bool findLiveInMBBs(SlotIndex start, SlotIndex end,
SmallVectorImpl<MachineBasicBlock*> &mbbs) const {
SmallVectorImpl<IdxMBBPair>::const_iterator itr =
std::lower_bound(idx2MBBMap.begin(), idx2MBBMap.end(), start);
bool resVal = false;
while (itr != idx2MBBMap.end()) {
if (itr->first >= end)
break;
mbbs.push_back(itr->second);
resVal = true;
++itr;
}
return resVal;
}
/// Returns the MBB covering the given range, or null if the range covers
/// more than one basic block.
MachineBasicBlock* getMBBCoveringRange(SlotIndex start, SlotIndex end) const {
assert(start < end && "Backwards ranges not allowed.");
SmallVectorImpl<IdxMBBPair>::const_iterator itr =
std::lower_bound(idx2MBBMap.begin(), idx2MBBMap.end(), start);
if (itr == idx2MBBMap.end()) {
itr = std::prev(itr);
return itr->second;
}
// Check that we don't cross the boundary into this block.
if (itr->first < end)
return nullptr;
itr = std::prev(itr);
if (itr->first <= start)
return itr->second;
return nullptr;
}
/// Insert the given machine instruction into the mapping. Returns the
/// assigned index.
/// If Late is set and there are null indexes between mi's neighboring
/// instructions, create the new index after the null indexes instead of
/// before them.
SlotIndex insertMachineInstrInMaps(MachineInstr *mi, bool Late = false) {
assert(!mi->isInsideBundle() &&
"Instructions inside bundles should use bundle start's slot.");
assert(mi2iMap.find(mi) == mi2iMap.end() && "Instr already indexed.");
// Numbering DBG_VALUE instructions could cause code generation to be
// affected by debug information.
assert(!mi->isDebugValue() && "Cannot number DBG_VALUE instructions.");
assert(mi->getParent() != nullptr && "Instr must be added to function.");
// Get the entries where mi should be inserted.
IndexList::iterator prevItr, nextItr;
if (Late) {
// Insert mi's index immediately before the following instruction.
nextItr = getIndexAfter(mi).listEntry();
prevItr = std::prev(nextItr);
} else {
// Insert mi's index immediately after the preceding instruction.
prevItr = getIndexBefore(mi).listEntry();
nextItr = std::next(prevItr);
}
// Get a number for the new instr, or 0 if there's no room currently.
// In the latter case we'll force a renumber later.
unsigned dist = ((nextItr->getIndex() - prevItr->getIndex())/2) & ~3u;
unsigned newNumber = prevItr->getIndex() + dist;
// Insert a new list entry for mi.
IndexList::iterator newItr =
indexList.insert(nextItr, createEntry(mi, newNumber));
// Renumber locally if we need to.
if (dist == 0)
renumberIndexes(newItr);
SlotIndex newIndex(&*newItr, SlotIndex::Slot_Block);
mi2iMap.insert(std::make_pair(mi, newIndex));
return newIndex;
}
/// Remove the given machine instruction from the mapping.
void removeMachineInstrFromMaps(MachineInstr *mi) {
// remove index -> MachineInstr and
// MachineInstr -> index mappings
Mi2IndexMap::iterator mi2iItr = mi2iMap.find(mi);
if (mi2iItr != mi2iMap.end()) {
IndexListEntry *miEntry(mi2iItr->second.listEntry());
assert(miEntry->getInstr() == mi && "Instruction indexes broken.");
// FIXME: Eventually we want to actually delete these indexes.
miEntry->setInstr(nullptr);
mi2iMap.erase(mi2iItr);
}
}
/// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in
/// maps used by register allocator.
void replaceMachineInstrInMaps(MachineInstr *mi, MachineInstr *newMI) {
Mi2IndexMap::iterator mi2iItr = mi2iMap.find(mi);
if (mi2iItr == mi2iMap.end())
return;
SlotIndex replaceBaseIndex = mi2iItr->second;
IndexListEntry *miEntry(replaceBaseIndex.listEntry());
assert(miEntry->getInstr() == mi &&
"Mismatched instruction in index tables.");
miEntry->setInstr(newMI);
mi2iMap.erase(mi2iItr);
mi2iMap.insert(std::make_pair(newMI, replaceBaseIndex));
}
/// Add the given MachineBasicBlock into the maps.
void insertMBBInMaps(MachineBasicBlock *mbb) {
MachineFunction::iterator nextMBB =
std::next(MachineFunction::iterator(mbb));
IndexListEntry *startEntry = nullptr;
IndexListEntry *endEntry = nullptr;
IndexList::iterator newItr;
if (nextMBB == mbb->getParent()->end()) {
startEntry = &indexList.back();
endEntry = createEntry(nullptr, 0);
newItr = indexList.insertAfter(startEntry, endEntry);
} else {
startEntry = createEntry(nullptr, 0);
endEntry = getMBBStartIdx(nextMBB).listEntry();
newItr = indexList.insert(endEntry, startEntry);
}
SlotIndex startIdx(startEntry, SlotIndex::Slot_Block);
SlotIndex endIdx(endEntry, SlotIndex::Slot_Block);
MachineFunction::iterator prevMBB(mbb);
assert(prevMBB != mbb->getParent()->end() &&
"Can't insert a new block at the beginning of a function.");
--prevMBB;
MBBRanges[prevMBB->getNumber()].second = startIdx;
assert(unsigned(mbb->getNumber()) == MBBRanges.size() &&
"Blocks must be added in order");
MBBRanges.push_back(std::make_pair(startIdx, endIdx));
idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
renumberIndexes(newItr);
std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
}
/// \brief Free the resources that were required to maintain a SlotIndex.
///
/// Once an index is no longer needed (for instance because the instruction
/// at that index has been moved), the resources required to maintain the
/// index can be relinquished to reduce memory use and improve renumbering
/// performance. Any remaining SlotIndex objects that point to the same
/// index are left 'dangling' (much the same as a dangling pointer to a
/// freed object) and should not be accessed, except to destruct them.
///
/// Like dangling pointers, access to dangling SlotIndexes can cause
/// painful-to-track-down bugs, especially if the memory for the index
/// previously pointed to has been re-used. To detect dangling SlotIndex
/// bugs, build with EXPENSIVE_CHECKS=1. This will cause "erased" indexes to
/// be retained in a graveyard instead of being freed. Operations on indexes
/// in the graveyard will trigger an assertion.
void eraseIndex(SlotIndex index) {
IndexListEntry *entry = index.listEntry();
#ifdef EXPENSIVE_CHECKS
indexList.remove(entry);
graveyardList.push_back(entry);
entry->setPoison();
#else
indexList.erase(entry);
#endif
}
};
// Specialize IntervalMapInfo for half-open slot index intervals.
template <>
struct IntervalMapInfo<SlotIndex> : IntervalMapHalfOpenInfo<SlotIndex> {
};
}
#endif // LLVM_CODEGEN_SLOTINDEXES_H
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/DFAPacketizer.h | //=- llvm/CodeGen/DFAPacketizer.h - DFA Packetizer for VLIW ---*- C++ -*-=====//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// This class implements a deterministic finite automaton (DFA) based
// packetizing mechanism for VLIW architectures. It provides APIs to
// determine whether there exists a legal mapping of instructions to
// functional unit assignments in a packet. The DFA is auto-generated from
// the target's Schedule.td file.
//
// A DFA consists of 3 major elements: states, inputs, and transitions. For
// the packetizing mechanism, the input is the set of instruction classes for
// a target. The state models all possible combinations of functional unit
// consumption for a given set of instructions in a packet. A transition
// models the addition of an instruction to a packet. In the DFA constructed
// by this class, if an instruction can be added to a packet, then a valid
// transition exists from the corresponding state. Invalid transitions
// indicate that the instruction cannot be added to the current packet.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_DFAPACKETIZER_H
#define LLVM_CODEGEN_DFAPACKETIZER_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include <map>
namespace llvm {
class MCInstrDesc;
class MachineInstr;
class MachineLoopInfo;
class MachineDominatorTree;
class InstrItineraryData;
class DefaultVLIWScheduler;
class SUnit;
class DFAPacketizer {
private:
typedef std::pair<unsigned, unsigned> UnsignPair;
const InstrItineraryData *InstrItins;
int CurrentState;
const int (*DFAStateInputTable)[2];
const unsigned *DFAStateEntryTable;
// CachedTable is a map from <FromState, Input> to ToState.
DenseMap<UnsignPair, unsigned> CachedTable;
// ReadTable - Read the DFA transition table and update CachedTable.
void ReadTable(unsigned int state);
public:
DFAPacketizer(const InstrItineraryData *I, const int (*SIT)[2],
const unsigned *SET);
// Reset the current state to make all resources available.
void clearResources() {
CurrentState = 0;
}
// canReserveResources - Check if the resources occupied by a MCInstrDesc
// are available in the current state.
bool canReserveResources(const llvm::MCInstrDesc *MID);
// reserveResources - Reserve the resources occupied by a MCInstrDesc and
// change the current state to reflect that change.
void reserveResources(const llvm::MCInstrDesc *MID);
// canReserveResources - Check if the resources occupied by a machine
// instruction are available in the current state.
bool canReserveResources(llvm::MachineInstr *MI);
// reserveResources - Reserve the resources occupied by a machine
// instruction and change the current state to reflect that change.
void reserveResources(llvm::MachineInstr *MI);
const InstrItineraryData *getInstrItins() const { return InstrItins; }
};
// VLIWPacketizerList - Implements a simple VLIW packetizer using DFA. The
// packetizer works on machine basic blocks. For each instruction I in BB, the
// packetizer consults the DFA to see if machine resources are available to
// execute I. If so, the packetizer checks if I depends on any instruction J in
// the current packet. If no dependency is found, I is added to current packet
// and machine resource is marked as taken. If any dependency is found, a target
// API call is made to prune the dependence.
class VLIWPacketizerList {
protected:
MachineFunction &MF;
const TargetInstrInfo *TII;
// The VLIW Scheduler.
DefaultVLIWScheduler *VLIWScheduler;
// Vector of instructions assigned to the current packet.
std::vector<MachineInstr*> CurrentPacketMIs;
// DFA resource tracker.
DFAPacketizer *ResourceTracker;
// Generate MI -> SU map.
std::map<MachineInstr*, SUnit*> MIToSUnit;
public:
VLIWPacketizerList(MachineFunction &MF, MachineLoopInfo &MLI, bool IsPostRA);
virtual ~VLIWPacketizerList();
// PacketizeMIs - Implement this API in the backend to bundle instructions.
void PacketizeMIs(MachineBasicBlock *MBB,
MachineBasicBlock::iterator BeginItr,
MachineBasicBlock::iterator EndItr);
// getResourceTracker - return ResourceTracker
DFAPacketizer *getResourceTracker() {return ResourceTracker;}
// addToPacket - Add MI to the current packet.
virtual MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
MachineBasicBlock::iterator MII = MI;
CurrentPacketMIs.push_back(MI);
ResourceTracker->reserveResources(MI);
return MII;
}
// endPacket - End the current packet.
void endPacket(MachineBasicBlock *MBB, MachineInstr *MI);
// initPacketizerState - perform initialization before packetizing
// an instruction. This function is supposed to be overrided by
// the target dependent packetizer.
virtual void initPacketizerState() { return; }
// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
virtual bool ignorePseudoInstruction(MachineInstr *I,
MachineBasicBlock *MBB) {
return false;
}
// isSoloInstruction - return true if instruction MI can not be packetized
// with any other instruction, which means that MI itself is a packet.
virtual bool isSoloInstruction(MachineInstr *MI) {
return true;
}
// isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
// together.
virtual bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
return false;
}
// isLegalToPruneDependencies - Is it legal to prune dependece between SUI
// and SUJ.
virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
return false;
}
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineInstrBundle.h | //===-- CodeGen/MachineInstBundle.h - MI bundle utilities -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provide utility functions to manipulate machine instruction
// bundles.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
#define LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
#include "llvm/CodeGen/MachineBasicBlock.h"
namespace llvm {
/// finalizeBundle - Finalize a machine instruction bundle which includes
/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
/// This routine adds a BUNDLE instruction to represent the bundle, it adds
/// IsInternalRead markers to MachineOperands which are defined inside the
/// bundle, and it copies externally visible defs and uses to the BUNDLE
/// instruction.
void finalizeBundle(MachineBasicBlock &MBB,
MachineBasicBlock::instr_iterator FirstMI,
MachineBasicBlock::instr_iterator LastMI);
/// finalizeBundle - Same functionality as the previous finalizeBundle except
/// the last instruction in the bundle is not provided as an input. This is
/// used in cases where bundles are pre-determined by marking instructions
/// with 'InsideBundle' marker. It returns the MBB instruction iterator that
/// points to the end of the bundle.
MachineBasicBlock::instr_iterator finalizeBundle(MachineBasicBlock &MBB,
MachineBasicBlock::instr_iterator FirstMI);
/// finalizeBundles - Finalize instruction bundles in the specified
/// MachineFunction. Return true if any bundles are finalized.
bool finalizeBundles(MachineFunction &MF);
/// getBundleStart - Returns the first instruction in the bundle containing MI.
///
inline MachineInstr *getBundleStart(MachineInstr *MI) {
MachineBasicBlock::instr_iterator I = MI;
while (I->isBundledWithPred())
--I;
return I;
}
inline const MachineInstr *getBundleStart(const MachineInstr *MI) {
MachineBasicBlock::const_instr_iterator I = MI;
while (I->isBundledWithPred())
--I;
return I;
}
/// Return an iterator pointing beyond the bundle containing MI.
inline MachineBasicBlock::instr_iterator
getBundleEnd(MachineInstr *MI) {
MachineBasicBlock::instr_iterator I = MI;
while (I->isBundledWithSucc())
++I;
return ++I;
}
/// Return an iterator pointing beyond the bundle containing MI.
inline MachineBasicBlock::const_instr_iterator
getBundleEnd(const MachineInstr *MI) {
MachineBasicBlock::const_instr_iterator I = MI;
while (I->isBundledWithSucc())
++I;
return ++I;
}
// //
///////////////////////////////////////////////////////////////////////////////
// MachineOperand iterator
//
/// MachineOperandIteratorBase - Iterator that can visit all operands on a
/// MachineInstr, or all operands on a bundle of MachineInstrs. This class is
/// not intended to be used directly, use one of the sub-classes instead.
///
/// Intended use:
///
/// for (MIBundleOperands MIO(MI); MIO.isValid(); ++MIO) {
/// if (!MIO->isReg())
/// continue;
/// ...
/// }
///
class MachineOperandIteratorBase {
MachineBasicBlock::instr_iterator InstrI, InstrE;
MachineInstr::mop_iterator OpI, OpE;
// If the operands on InstrI are exhausted, advance InstrI to the next
// bundled instruction with operands.
void advance() {
while (OpI == OpE) {
// Don't advance off the basic block, or into a new bundle.
if (++InstrI == InstrE || !InstrI->isInsideBundle())
break;
OpI = InstrI->operands_begin();
OpE = InstrI->operands_end();
}
}
protected:
/// MachineOperandIteratorBase - Create an iterator that visits all operands
/// on MI, or all operands on every instruction in the bundle containing MI.
///
/// @param MI The instruction to examine.
/// @param WholeBundle When true, visit all operands on the entire bundle.
///
explicit MachineOperandIteratorBase(MachineInstr *MI, bool WholeBundle) {
if (WholeBundle) {
InstrI = getBundleStart(MI);
InstrE = MI->getParent()->instr_end();
} else {
InstrI = InstrE = MI;
++InstrE;
}
OpI = InstrI->operands_begin();
OpE = InstrI->operands_end();
if (WholeBundle)
advance();
}
MachineOperand &deref() const { return *OpI; }
public:
/// isValid - Returns true until all the operands have been visited.
bool isValid() const { return OpI != OpE; }
/// Preincrement. Move to the next operand.
void operator++() {
assert(isValid() && "Cannot advance MIOperands beyond the last operand");
++OpI;
advance();
}
/// getOperandNo - Returns the number of the current operand relative to its
/// instruction.
///
unsigned getOperandNo() const {
return OpI - InstrI->operands_begin();
}
/// VirtRegInfo - Information about a virtual register used by a set of operands.
///
struct VirtRegInfo {
/// Reads - One of the operands read the virtual register. This does not
/// include <undef> or <internal> use operands, see MO::readsReg().
bool Reads;
/// Writes - One of the operands writes the virtual register.
bool Writes;
/// Tied - Uses and defs must use the same register. This can be because of
/// a two-address constraint, or there may be a partial redefinition of a
/// sub-register.
bool Tied;
};
/// PhysRegInfo - Information about a physical register used by a set of
/// operands.
struct PhysRegInfo {
/// Clobbers - Reg or an overlapping register is defined, or a regmask
/// clobbers Reg.
bool Clobbers;
/// Defines - Reg or a super-register is defined.
bool Defines;
/// Reads - Read or a super-register is read.
bool Reads;
/// ReadsOverlap - Reg or an overlapping register is read.
bool ReadsOverlap;
/// DefinesDead - All defs of a Reg or a super-register are dead.
bool DefinesDead;
/// There is a kill of Reg or a super-register.
bool Kills;
};
/// analyzeVirtReg - Analyze how the current instruction or bundle uses a
/// virtual register. This function should not be called after operator++(),
/// it expects a fresh iterator.
///
/// @param Reg The virtual register to analyze.
/// @param Ops When set, this vector will receive an (MI, OpNum) entry for
/// each operand referring to Reg.
/// @returns A filled-in RegInfo struct.
VirtRegInfo analyzeVirtReg(unsigned Reg,
SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops = nullptr);
/// analyzePhysReg - Analyze how the current instruction or bundle uses a
/// physical register. This function should not be called after operator++(),
/// it expects a fresh iterator.
///
/// @param Reg The physical register to analyze.
/// @returns A filled-in PhysRegInfo struct.
PhysRegInfo analyzePhysReg(unsigned Reg, const TargetRegisterInfo *TRI);
};
/// MIOperands - Iterate over operands of a single instruction.
///
class MIOperands : public MachineOperandIteratorBase {
public:
MIOperands(MachineInstr *MI) : MachineOperandIteratorBase(MI, false) {}
MachineOperand &operator* () const { return deref(); }
MachineOperand *operator->() const { return &deref(); }
};
/// ConstMIOperands - Iterate over operands of a single const instruction.
///
class ConstMIOperands : public MachineOperandIteratorBase {
public:
ConstMIOperands(const MachineInstr *MI)
: MachineOperandIteratorBase(const_cast<MachineInstr*>(MI), false) {}
const MachineOperand &operator* () const { return deref(); }
const MachineOperand *operator->() const { return &deref(); }
};
/// MIBundleOperands - Iterate over all operands in a bundle of machine
/// instructions.
///
class MIBundleOperands : public MachineOperandIteratorBase {
public:
MIBundleOperands(MachineInstr *MI) : MachineOperandIteratorBase(MI, true) {}
MachineOperand &operator* () const { return deref(); }
MachineOperand *operator->() const { return &deref(); }
};
/// ConstMIBundleOperands - Iterate over all operands in a const bundle of
/// machine instructions.
///
class ConstMIBundleOperands : public MachineOperandIteratorBase {
public:
ConstMIBundleOperands(const MachineInstr *MI)
: MachineOperandIteratorBase(const_cast<MachineInstr*>(MI), true) {}
const MachineOperand &operator* () const { return deref(); }
const MachineOperand *operator->() const { return &deref(); }
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/CallingConvLower.h | //===-- llvm/CallingConvLower.h - Calling Conventions -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the CCState and CCValAssign classes, used for lowering
// and implementing calling conventions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_CALLINGCONVLOWER_H
#define LLVM_CODEGEN_CALLINGCONVLOWER_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Target/TargetCallingConv.h"
namespace llvm {
class CCState;
class MVT;
class TargetMachine;
class TargetRegisterInfo;
/// CCValAssign - Represent assignment of one arg/retval to a location.
class CCValAssign {
public:
enum LocInfo {
Full, // The value fills the full location.
SExt, // The value is sign extended in the location.
ZExt, // The value is zero extended in the location.
AExt, // The value is extended with undefined upper bits.
SExtUpper, // The value is in the upper bits of the location and should be
// sign extended when retrieved.
ZExtUpper, // The value is in the upper bits of the location and should be
// zero extended when retrieved.
AExtUpper, // The value is in the upper bits of the location and should be
// extended with undefined upper bits when retrieved.
BCvt, // The value is bit-converted in the location.
VExt, // The value is vector-widened in the location.
// FIXME: Not implemented yet. Code that uses AExt to mean
// vector-widen should be fixed to use VExt instead.
FPExt, // The floating-point value is fp-extended in the location.
Indirect // The location contains pointer to the value.
// TODO: a subset of the value is in the location.
};
private:
/// ValNo - This is the value number begin assigned (e.g. an argument number).
unsigned ValNo;
/// Loc is either a stack offset or a register number.
unsigned Loc;
/// isMem - True if this is a memory loc, false if it is a register loc.
unsigned isMem : 1;
/// isCustom - True if this arg/retval requires special handling.
unsigned isCustom : 1;
/// Information about how the value is assigned.
LocInfo HTP : 6;
/// ValVT - The type of the value being assigned.
MVT ValVT;
/// LocVT - The type of the location being assigned to.
MVT LocVT;
public:
static CCValAssign getReg(unsigned ValNo, MVT ValVT,
unsigned RegNo, MVT LocVT,
LocInfo HTP) {
CCValAssign Ret;
Ret.ValNo = ValNo;
Ret.Loc = RegNo;
Ret.isMem = false;
Ret.isCustom = false;
Ret.HTP = HTP;
Ret.ValVT = ValVT;
Ret.LocVT = LocVT;
return Ret;
}
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT,
unsigned RegNo, MVT LocVT,
LocInfo HTP) {
CCValAssign Ret;
Ret = getReg(ValNo, ValVT, RegNo, LocVT, HTP);
Ret.isCustom = true;
return Ret;
}
static CCValAssign getMem(unsigned ValNo, MVT ValVT,
unsigned Offset, MVT LocVT,
LocInfo HTP) {
CCValAssign Ret;
Ret.ValNo = ValNo;
Ret.Loc = Offset;
Ret.isMem = true;
Ret.isCustom = false;
Ret.HTP = HTP;
Ret.ValVT = ValVT;
Ret.LocVT = LocVT;
return Ret;
}
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT,
unsigned Offset, MVT LocVT,
LocInfo HTP) {
CCValAssign Ret;
Ret = getMem(ValNo, ValVT, Offset, LocVT, HTP);
Ret.isCustom = true;
return Ret;
}
// There is no need to differentiate between a pending CCValAssign and other
// kinds, as they are stored in a different list.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT,
LocInfo HTP, unsigned ExtraInfo = 0) {
return getReg(ValNo, ValVT, ExtraInfo, LocVT, HTP);
}
void convertToReg(unsigned RegNo) {
Loc = RegNo;
isMem = false;
}
void convertToMem(unsigned Offset) {
Loc = Offset;
isMem = true;
}
unsigned getValNo() const { return ValNo; }
MVT getValVT() const { return ValVT; }
bool isRegLoc() const { return !isMem; }
bool isMemLoc() const { return isMem; }
bool needsCustom() const { return isCustom; }
unsigned getLocReg() const { assert(isRegLoc()); return Loc; }
unsigned getLocMemOffset() const { assert(isMemLoc()); return Loc; }
unsigned getExtraInfo() const { return Loc; }
MVT getLocVT() const { return LocVT; }
LocInfo getLocInfo() const { return HTP; }
bool isExtInLoc() const {
return (HTP == AExt || HTP == SExt || HTP == ZExt);
}
bool isUpperBitsInLoc() const {
return HTP == AExtUpper || HTP == SExtUpper || HTP == ZExtUpper;
}
};
/// Describes a register that needs to be forwarded from the prologue to a
/// musttail call.
struct ForwardedRegister {
ForwardedRegister(unsigned VReg, MCPhysReg PReg, MVT VT)
: VReg(VReg), PReg(PReg), VT(VT) {}
unsigned VReg;
MCPhysReg PReg;
MVT VT;
};
/// CCAssignFn - This function assigns a location for Val, updating State to
/// reflect the change. It returns 'true' if it failed to handle Val.
typedef bool CCAssignFn(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State);
/// CCCustomFn - This function assigns a location for Val, possibly updating
/// all args to reflect changes and indicates if it handled it. It must set
/// isCustom if it handles the arg and returns true.
typedef bool CCCustomFn(unsigned &ValNo, MVT &ValVT,
MVT &LocVT, CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags, CCState &State);
/// ParmContext - This enum tracks whether calling convention lowering is in
/// the context of prologue or call generation. Not all backends make use of
/// this information.
typedef enum { Unknown, Prologue, Call } ParmContext;
/// CCState - This class holds information needed while lowering arguments and
/// return values. It captures which registers are already assigned and which
/// stack slots are used. It provides accessors to allocate these values.
class CCState {
private:
CallingConv::ID CallingConv;
bool IsVarArg;
MachineFunction &MF;
const TargetRegisterInfo &TRI;
SmallVectorImpl<CCValAssign> &Locs;
LLVMContext &Context;
unsigned StackOffset;
SmallVector<uint32_t, 16> UsedRegs;
SmallVector<CCValAssign, 4> PendingLocs;
// ByValInfo and SmallVector<ByValInfo, 4> ByValRegs:
//
// Vector of ByValInfo instances (ByValRegs) is introduced for byval registers
// tracking.
// Or, in another words it tracks byval parameters that are stored in
// general purpose registers.
//
// For 4 byte stack alignment,
// instance index means byval parameter number in formal
// arguments set. Assume, we have some "struct_type" with size = 4 bytes,
// then, for function "foo":
//
// i32 foo(i32 %p, %struct_type* %r, i32 %s, %struct_type* %t)
//
// ByValRegs[0] describes how "%r" is stored (Begin == r1, End == r2)
// ByValRegs[1] describes how "%t" is stored (Begin == r3, End == r4).
//
// In case of 8 bytes stack alignment,
// ByValRegs may also contain information about wasted registers.
// In function shown above, r3 would be wasted according to AAPCS rules.
// And in that case ByValRegs[1].Waste would be "true".
// ByValRegs vector size still would be 2,
// while "%t" goes to the stack: it wouldn't be described in ByValRegs.
//
// Supposed use-case for this collection:
// 1. Initially ByValRegs is empty, InRegsParamsProcessed is 0.
// 2. HandleByVal fillups ByValRegs.
// 3. Argument analysis (LowerFormatArguments, for example). After
// some byval argument was analyzed, InRegsParamsProcessed is increased.
struct ByValInfo {
ByValInfo(unsigned B, unsigned E, bool IsWaste = false) :
Begin(B), End(E), Waste(IsWaste) {}
// First register allocated for current parameter.
unsigned Begin;
// First after last register allocated for current parameter.
unsigned End;
// Means that current range of registers doesn't belong to any
// parameters. It was wasted due to stack alignment rules.
// For more information see:
// AAPCS, 5.5 Parameter Passing, Stage C, C.3.
bool Waste;
};
SmallVector<ByValInfo, 4 > ByValRegs;
// InRegsParamsProcessed - shows how many instances of ByValRegs was proceed
// during argument analysis.
unsigned InRegsParamsProcessed;
protected:
ParmContext CallOrPrologue;
public:
CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
SmallVectorImpl<CCValAssign> &locs, LLVMContext &C);
void addLoc(const CCValAssign &V) {
Locs.push_back(V);
}
LLVMContext &getContext() const { return Context; }
MachineFunction &getMachineFunction() const { return MF; }
CallingConv::ID getCallingConv() const { return CallingConv; }
bool isVarArg() const { return IsVarArg; }
unsigned getNextStackOffset() const { return StackOffset; }
/// isAllocated - Return true if the specified register (or an alias) is
/// allocated.
bool isAllocated(unsigned Reg) const {
return UsedRegs[Reg/32] & (1 << (Reg&31));
}
/// AnalyzeFormalArguments - Analyze an array of argument values,
/// incorporating info about the formals into this state.
void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn);
/// AnalyzeReturn - Analyze the returned values of a return,
/// incorporating info about the result values into this state.
void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn);
/// CheckReturn - Analyze the return values of a function, returning
/// true if the return can be performed without sret-demotion, and
/// false otherwise.
bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
CCAssignFn Fn);
/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
/// incorporating info about the passed values into this state.
void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn);
/// AnalyzeCallOperands - Same as above except it takes vectors of types
/// and argument flags.
void AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
CCAssignFn Fn);
/// AnalyzeCallResult - Analyze the return values of a call,
/// incorporating info about the passed values into this state.
void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn);
/// AnalyzeCallResult - Same as above except it's specialized for calls which
/// produce a single value.
void AnalyzeCallResult(MVT VT, CCAssignFn Fn);
/// getFirstUnallocated - Return the index of the first unallocated register
/// in the set, or Regs.size() if they are all allocated.
unsigned getFirstUnallocated(ArrayRef<MCPhysReg> Regs) const {
for (unsigned i = 0; i < Regs.size(); ++i)
if (!isAllocated(Regs[i]))
return i;
return Regs.size();
}
/// AllocateReg - Attempt to allocate one register. If it is not available,
/// return zero. Otherwise, return the register, marking it and any aliases
/// as allocated.
unsigned AllocateReg(unsigned Reg) {
if (isAllocated(Reg)) return 0;
MarkAllocated(Reg);
return Reg;
}
/// Version of AllocateReg with extra register to be shadowed.
unsigned AllocateReg(unsigned Reg, unsigned ShadowReg) {
if (isAllocated(Reg)) return 0;
MarkAllocated(Reg);
MarkAllocated(ShadowReg);
return Reg;
}
/// AllocateReg - Attempt to allocate one of the specified registers. If none
/// are available, return zero. Otherwise, return the first one available,
/// marking it and any aliases as allocated.
unsigned AllocateReg(ArrayRef<MCPhysReg> Regs) {
unsigned FirstUnalloc = getFirstUnallocated(Regs);
if (FirstUnalloc == Regs.size())
return 0; // Didn't find the reg.
// Mark the register and any aliases as allocated.
unsigned Reg = Regs[FirstUnalloc];
MarkAllocated(Reg);
return Reg;
}
/// AllocateRegBlock - Attempt to allocate a block of RegsRequired consecutive
/// registers. If this is not possible, return zero. Otherwise, return the first
/// register of the block that were allocated, marking the entire block as allocated.
unsigned AllocateRegBlock(ArrayRef<uint16_t> Regs, unsigned RegsRequired) {
if (RegsRequired > Regs.size())
return 0;
for (unsigned StartIdx = 0; StartIdx <= Regs.size() - RegsRequired;
++StartIdx) {
bool BlockAvailable = true;
// Check for already-allocated regs in this block
for (unsigned BlockIdx = 0; BlockIdx < RegsRequired; ++BlockIdx) {
if (isAllocated(Regs[StartIdx + BlockIdx])) {
BlockAvailable = false;
break;
}
}
if (BlockAvailable) {
// Mark the entire block as allocated
for (unsigned BlockIdx = 0; BlockIdx < RegsRequired; ++BlockIdx) {
MarkAllocated(Regs[StartIdx + BlockIdx]);
}
return Regs[StartIdx];
}
}
// No block was available
return 0;
}
/// Version of AllocateReg with list of registers to be shadowed.
unsigned AllocateReg(ArrayRef<MCPhysReg> Regs, const MCPhysReg *ShadowRegs) {
unsigned FirstUnalloc = getFirstUnallocated(Regs);
if (FirstUnalloc == Regs.size())
return 0; // Didn't find the reg.
// Mark the register and any aliases as allocated.
unsigned Reg = Regs[FirstUnalloc], ShadowReg = ShadowRegs[FirstUnalloc];
MarkAllocated(Reg);
MarkAllocated(ShadowReg);
return Reg;
}
/// AllocateStack - Allocate a chunk of stack space with the specified size
/// and alignment.
unsigned AllocateStack(unsigned Size, unsigned Align) {
assert(Align && ((Align - 1) & Align) == 0); // Align is power of 2.
StackOffset = ((StackOffset + Align - 1) & ~(Align - 1));
unsigned Result = StackOffset;
StackOffset += Size;
MF.getFrameInfo()->ensureMaxAlignment(Align);
return Result;
}
/// Version of AllocateStack with extra register to be shadowed.
unsigned AllocateStack(unsigned Size, unsigned Align, unsigned ShadowReg) {
MarkAllocated(ShadowReg);
return AllocateStack(Size, Align);
}
/// Version of AllocateStack with list of extra registers to be shadowed.
/// Note that, unlike AllocateReg, this shadows ALL of the shadow registers.
unsigned AllocateStack(unsigned Size, unsigned Align,
ArrayRef<MCPhysReg> ShadowRegs) {
for (unsigned i = 0; i < ShadowRegs.size(); ++i)
MarkAllocated(ShadowRegs[i]);
return AllocateStack(Size, Align);
}
// HandleByVal - Allocate a stack slot large enough to pass an argument by
// value. The size and alignment information of the argument is encoded in its
// parameter attribute.
void HandleByVal(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags);
// Returns count of byval arguments that are to be stored (even partly)
// in registers.
unsigned getInRegsParamsCount() const { return ByValRegs.size(); }
// Returns count of byval in-regs arguments proceed.
unsigned getInRegsParamsProcessed() const { return InRegsParamsProcessed; }
// Get information about N-th byval parameter that is stored in registers.
// Here "ByValParamIndex" is N.
void getInRegsParamInfo(unsigned InRegsParamRecordIndex,
unsigned& BeginReg, unsigned& EndReg) const {
assert(InRegsParamRecordIndex < ByValRegs.size() &&
"Wrong ByVal parameter index");
const ByValInfo& info = ByValRegs[InRegsParamRecordIndex];
BeginReg = info.Begin;
EndReg = info.End;
}
// Add information about parameter that is kept in registers.
void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd) {
ByValRegs.push_back(ByValInfo(RegBegin, RegEnd));
}
// Goes either to next byval parameter (excluding "waste" record), or
// to the end of collection.
// Returns false, if end is reached.
bool nextInRegsParam() {
unsigned e = ByValRegs.size();
if (InRegsParamsProcessed < e)
++InRegsParamsProcessed;
return InRegsParamsProcessed < e;
}
// Clear byval registers tracking info.
void clearByValRegsInfo() {
InRegsParamsProcessed = 0;
ByValRegs.clear();
}
// Rewind byval registers tracking info.
void rewindByValRegsInfo() {
InRegsParamsProcessed = 0;
}
ParmContext getCallOrPrologue() const { return CallOrPrologue; }
// Get list of pending assignments
SmallVectorImpl<llvm::CCValAssign> &getPendingLocs() {
return PendingLocs;
}
/// Compute the remaining unused register parameters that would be used for
/// the given value type. This is useful when varargs are passed in the
/// registers that normal prototyped parameters would be passed in, or for
/// implementing perfect forwarding.
void getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs, MVT VT,
CCAssignFn Fn);
/// Compute the set of registers that need to be preserved and forwarded to
/// any musttail calls.
void analyzeMustTailForwardedRegisters(
SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
CCAssignFn Fn);
private:
/// MarkAllocated - Mark a register and all of its aliases as allocated.
void MarkAllocated(unsigned Reg);
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineSSAUpdater.h | //===-- MachineSSAUpdater.h - Unstructured SSA Update Tool ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the MachineSSAUpdater class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINESSAUPDATER_H
#define LLVM_CODEGEN_MACHINESSAUPDATER_H
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class MachineOperand;
class MachineRegisterInfo;
class TargetInstrInfo;
class TargetRegisterClass;
template<typename T> class SmallVectorImpl;
template<typename T> class SSAUpdaterTraits;
/// MachineSSAUpdater - This class updates SSA form for a set of virtual
/// registers defined in multiple blocks. This is used when code duplication
/// or another unstructured transformation wants to rewrite a set of uses of one
/// vreg with uses of a set of vregs.
class MachineSSAUpdater {
friend class SSAUpdaterTraits<MachineSSAUpdater>;
private:
/// AvailableVals - This keeps track of which value to use on a per-block
/// basis. When we insert PHI nodes, we keep track of them here.
//typedef DenseMap<MachineBasicBlock*, unsigned > AvailableValsTy;
void *AV;
/// VR - Current virtual register whose uses are being updated.
unsigned VR;
/// VRC - Register class of the current virtual register.
const TargetRegisterClass *VRC;
/// InsertedPHIs - If this is non-null, the MachineSSAUpdater adds all PHI
/// nodes that it creates to the vector.
SmallVectorImpl<MachineInstr*> *InsertedPHIs;
const TargetInstrInfo *TII;
MachineRegisterInfo *MRI;
public:
/// MachineSSAUpdater constructor. If InsertedPHIs is specified, it will be
/// filled in with all PHI Nodes created by rewriting.
explicit MachineSSAUpdater(MachineFunction &MF,
SmallVectorImpl<MachineInstr*> *InsertedPHIs = nullptr);
~MachineSSAUpdater();
/// Initialize - Reset this object to get ready for a new set of SSA
/// updates.
void Initialize(unsigned V);
/// AddAvailableValue - Indicate that a rewritten value is available at the
/// end of the specified block with the specified value.
void AddAvailableValue(MachineBasicBlock *BB, unsigned V);
/// HasValueForBlock - Return true if the MachineSSAUpdater already has a
/// value for the specified block.
bool HasValueForBlock(MachineBasicBlock *BB) const;
/// GetValueAtEndOfBlock - Construct SSA form, materializing a value that is
/// live at the end of the specified block.
unsigned GetValueAtEndOfBlock(MachineBasicBlock *BB);
/// GetValueInMiddleOfBlock - Construct SSA form, materializing a value that
/// is live in the middle of the specified block.
///
/// GetValueInMiddleOfBlock is the same as GetValueAtEndOfBlock except in one
/// important case: if there is a definition of the rewritten value after the
/// 'use' in BB. Consider code like this:
///
/// X1 = ...
/// SomeBB:
/// use(X)
/// X2 = ...
/// br Cond, SomeBB, OutBB
///
/// In this case, there are two values (X1 and X2) added to the AvailableVals
/// set by the client of the rewriter, and those values are both live out of
/// their respective blocks. However, the use of X happens in the *middle* of
/// a block. Because of this, we need to insert a new PHI node in SomeBB to
/// merge the appropriate values, and this value isn't live out of the block.
///
unsigned GetValueInMiddleOfBlock(MachineBasicBlock *BB);
/// RewriteUse - Rewrite a use of the symbolic value. This handles PHI nodes,
/// which use their value in the corresponding predecessor. Note that this
/// will not work if the use is supposed to be rewritten to a value defined in
/// the same block as the use, but above it. Any 'AddAvailableValue's added
/// for the use's block will be considered to be below it.
void RewriteUse(MachineOperand &U);
private:
unsigned GetValueAtEndOfBlockInternal(MachineBasicBlock *BB);
void operator=(const MachineSSAUpdater&) = delete;
MachineSSAUpdater(const MachineSSAUpdater&) = delete;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/LiveInterval.h | //===-- llvm/CodeGen/LiveInterval.h - Interval representation ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the LiveRange and LiveInterval classes. Given some
// numbering of each the machine instructions an interval [i, j) is said to be a
// live range for register v if there is no instruction with number j' >= j
// such that v is live at j' and there is no instruction with number i' < i such
// that v is live at i'. In this implementation ranges can have holes,
// i.e. a range might look like [1,20), [50,65), [1000,1001). Each
// individual segment is represented as an instance of LiveRange::Segment,
// and the whole range is represented as an instance of LiveRange.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LIVEINTERVAL_H
#define LLVM_CODEGEN_LIVEINTERVAL_H
#include "llvm/ADT/IntEqClasses.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Allocator.h"
#include <cassert>
#include <climits>
#include <set>
namespace llvm {
class CoalescerPair;
class LiveIntervals;
class MachineInstr;
class MachineRegisterInfo;
class TargetRegisterInfo;
class raw_ostream;
template <typename T, unsigned Small> class SmallPtrSet;
/// VNInfo - Value Number Information.
/// This class holds information about a machine level values, including
/// definition and use points.
///
class VNInfo {
public:
typedef BumpPtrAllocator Allocator;
/// The ID number of this value.
unsigned id;
/// The index of the defining instruction.
SlotIndex def;
/// VNInfo constructor.
VNInfo(unsigned i, SlotIndex d)
: id(i), def(d)
{ }
/// VNInfo construtor, copies values from orig, except for the value number.
VNInfo(unsigned i, const VNInfo &orig)
: id(i), def(orig.def)
{ }
/// Copy from the parameter into this VNInfo.
void copyFrom(VNInfo &src) {
def = src.def;
}
/// Returns true if this value is defined by a PHI instruction (or was,
/// PHI instructions may have been eliminated).
/// PHI-defs begin at a block boundary, all other defs begin at register or
/// EC slots.
bool isPHIDef() const { return def.isBlock(); }
/// Returns true if this value is unused.
bool isUnused() const { return !def.isValid(); }
/// Mark this value as unused.
void markUnused() { def = SlotIndex(); }
};
/// Result of a LiveRange query. This class hides the implementation details
/// of live ranges, and it should be used as the primary interface for
/// examining live ranges around instructions.
class LiveQueryResult {
VNInfo *const EarlyVal;
VNInfo *const LateVal;
const SlotIndex EndPoint;
const bool Kill;
public:
LiveQueryResult(VNInfo *EarlyVal, VNInfo *LateVal, SlotIndex EndPoint,
bool Kill)
: EarlyVal(EarlyVal), LateVal(LateVal), EndPoint(EndPoint), Kill(Kill)
{}
/// Return the value that is live-in to the instruction. This is the value
/// that will be read by the instruction's use operands. Return NULL if no
/// value is live-in.
VNInfo *valueIn() const {
return EarlyVal;
}
/// Return true if the live-in value is killed by this instruction. This
/// means that either the live range ends at the instruction, or it changes
/// value.
bool isKill() const {
return Kill;
}
/// Return true if this instruction has a dead def.
bool isDeadDef() const {
return EndPoint.isDead();
}
/// Return the value leaving the instruction, if any. This can be a
/// live-through value, or a live def. A dead def returns NULL.
VNInfo *valueOut() const {
return isDeadDef() ? nullptr : LateVal;
}
/// Returns the value alive at the end of the instruction, if any. This can
/// be a live-through value, a live def or a dead def.
VNInfo *valueOutOrDead() const {
return LateVal;
}
/// Return the value defined by this instruction, if any. This includes
/// dead defs, it is the value created by the instruction's def operands.
VNInfo *valueDefined() const {
return EarlyVal == LateVal ? nullptr : LateVal;
}
/// Return the end point of the last live range segment to interact with
/// the instruction, if any.
///
/// The end point is an invalid SlotIndex only if the live range doesn't
/// intersect the instruction at all.
///
/// The end point may be at or past the end of the instruction's basic
/// block. That means the value was live out of the block.
SlotIndex endPoint() const {
return EndPoint;
}
};
/// This class represents the liveness of a register, stack slot, etc.
/// It manages an ordered list of Segment objects.
/// The Segments are organized in a static single assignment form: At places
/// where a new value is defined or different values reach a CFG join a new
/// segment with a new value number is used.
class LiveRange {
public:
/// This represents a simple continuous liveness interval for a value.
/// The start point is inclusive, the end point exclusive. These intervals
/// are rendered as [start,end).
struct Segment {
SlotIndex start; // Start point of the interval (inclusive)
SlotIndex end; // End point of the interval (exclusive)
VNInfo *valno; // identifier for the value contained in this segment.
Segment() : valno(nullptr) {}
Segment(SlotIndex S, SlotIndex E, VNInfo *V)
: start(S), end(E), valno(V) {
assert(S < E && "Cannot create empty or backwards segment");
}
/// Return true if the index is covered by this segment.
bool contains(SlotIndex I) const {
return start <= I && I < end;
}
/// Return true if the given interval, [S, E), is covered by this segment.
bool containsInterval(SlotIndex S, SlotIndex E) const {
assert((S < E) && "Backwards interval?");
return (start <= S && S < end) && (start < E && E <= end);
}
bool operator<(const Segment &Other) const {
return std::tie(start, end) < std::tie(Other.start, Other.end);
}
bool operator==(const Segment &Other) const {
return start == Other.start && end == Other.end;
}
void dump() const;
};
typedef SmallVector<Segment,4> Segments;
typedef SmallVector<VNInfo*,4> VNInfoList;
Segments segments; // the liveness segments
VNInfoList valnos; // value#'s
// The segment set is used temporarily to accelerate initial computation
// of live ranges of physical registers in computeRegUnitRange.
// After that the set is flushed to the segment vector and deleted.
typedef std::set<Segment> SegmentSet;
std::unique_ptr<SegmentSet> segmentSet;
typedef Segments::iterator iterator;
iterator begin() { return segments.begin(); }
iterator end() { return segments.end(); }
typedef Segments::const_iterator const_iterator;
const_iterator begin() const { return segments.begin(); }
const_iterator end() const { return segments.end(); }
typedef VNInfoList::iterator vni_iterator;
vni_iterator vni_begin() { return valnos.begin(); }
vni_iterator vni_end() { return valnos.end(); }
typedef VNInfoList::const_iterator const_vni_iterator;
const_vni_iterator vni_begin() const { return valnos.begin(); }
const_vni_iterator vni_end() const { return valnos.end(); }
/// Constructs a new LiveRange object.
LiveRange(bool UseSegmentSet = false)
: segmentSet(UseSegmentSet ? llvm::make_unique<SegmentSet>()
: nullptr) {}
/// Constructs a new LiveRange object by copying segments and valnos from
/// another LiveRange.
LiveRange(const LiveRange &Other, BumpPtrAllocator &Allocator) {
assert(Other.segmentSet == nullptr &&
"Copying of LiveRanges with active SegmentSets is not supported");
// Duplicate valnos.
for (const VNInfo *VNI : Other.valnos) {
createValueCopy(VNI, Allocator);
}
// Now we can copy segments and remap their valnos.
for (const Segment &S : Other.segments) {
segments.push_back(Segment(S.start, S.end, valnos[S.valno->id]));
}
}
/// advanceTo - Advance the specified iterator to point to the Segment
/// containing the specified position, or end() if the position is past the
/// end of the range. If no Segment contains this position, but the
/// position is in a hole, this method returns an iterator pointing to the
/// Segment immediately after the hole.
iterator advanceTo(iterator I, SlotIndex Pos) {
assert(I != end());
if (Pos >= endIndex())
return end();
while (I->end <= Pos) ++I;
return I;
}
const_iterator advanceTo(const_iterator I, SlotIndex Pos) const {
assert(I != end());
if (Pos >= endIndex())
return end();
while (I->end <= Pos) ++I;
return I;
}
/// find - Return an iterator pointing to the first segment that ends after
/// Pos, or end(). This is the same as advanceTo(begin(), Pos), but faster
/// when searching large ranges.
///
/// If Pos is contained in a Segment, that segment is returned.
/// If Pos is in a hole, the following Segment is returned.
/// If Pos is beyond endIndex, end() is returned.
iterator find(SlotIndex Pos);
const_iterator find(SlotIndex Pos) const {
return const_cast<LiveRange*>(this)->find(Pos);
}
void clear() {
valnos.clear();
segments.clear();
}
size_t size() const {
return segments.size();
}
bool hasAtLeastOneValue() const { return !valnos.empty(); }
bool containsOneValue() const { return valnos.size() == 1; }
unsigned getNumValNums() const { return (unsigned)valnos.size(); }
/// getValNumInfo - Returns pointer to the specified val#.
///
inline VNInfo *getValNumInfo(unsigned ValNo) {
return valnos[ValNo];
}
inline const VNInfo *getValNumInfo(unsigned ValNo) const {
return valnos[ValNo];
}
/// containsValue - Returns true if VNI belongs to this range.
bool containsValue(const VNInfo *VNI) const {
return VNI && VNI->id < getNumValNums() && VNI == getValNumInfo(VNI->id);
}
/// getNextValue - Create a new value number and return it. MIIdx specifies
/// the instruction that defines the value number.
VNInfo *getNextValue(SlotIndex def, VNInfo::Allocator &VNInfoAllocator) {
VNInfo *VNI =
new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def);
valnos.push_back(VNI);
return VNI;
}
/// createDeadDef - Make sure the range has a value defined at Def.
/// If one already exists, return it. Otherwise allocate a new value and
/// add liveness for a dead def.
VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator &VNInfoAllocator);
/// Create a copy of the given value. The new value will be identical except
/// for the Value number.
VNInfo *createValueCopy(const VNInfo *orig,
VNInfo::Allocator &VNInfoAllocator) {
VNInfo *VNI =
new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), *orig);
valnos.push_back(VNI);
return VNI;
}
/// RenumberValues - Renumber all values in order of appearance and remove
/// unused values.
void RenumberValues();
/// MergeValueNumberInto - This method is called when two value numbers
/// are found to be equivalent. This eliminates V1, replacing all
/// segments with the V1 value number with the V2 value number. This can
/// cause merging of V1/V2 values numbers and compaction of the value space.
VNInfo* MergeValueNumberInto(VNInfo *V1, VNInfo *V2);
/// Merge all of the live segments of a specific val# in RHS into this live
/// range as the specified value number. The segments in RHS are allowed
/// to overlap with segments in the current range, it will replace the
/// value numbers of the overlaped live segments with the specified value
/// number.
void MergeSegmentsInAsValue(const LiveRange &RHS, VNInfo *LHSValNo);
/// MergeValueInAsValue - Merge all of the segments of a specific val#
/// in RHS into this live range as the specified value number.
/// The segments in RHS are allowed to overlap with segments in the
/// current range, but only if the overlapping segments have the
/// specified value number.
void MergeValueInAsValue(const LiveRange &RHS,
const VNInfo *RHSValNo, VNInfo *LHSValNo);
bool empty() const { return segments.empty(); }
/// beginIndex - Return the lowest numbered slot covered.
SlotIndex beginIndex() const {
assert(!empty() && "Call to beginIndex() on empty range.");
return segments.front().start;
}
/// endNumber - return the maximum point of the range of the whole,
/// exclusive.
SlotIndex endIndex() const {
assert(!empty() && "Call to endIndex() on empty range.");
return segments.back().end;
}
bool expiredAt(SlotIndex index) const {
return index >= endIndex();
}
bool liveAt(SlotIndex index) const {
const_iterator r = find(index);
return r != end() && r->start <= index;
}
/// Return the segment that contains the specified index, or null if there
/// is none.
const Segment *getSegmentContaining(SlotIndex Idx) const {
const_iterator I = FindSegmentContaining(Idx);
return I == end() ? nullptr : &*I;
}
/// Return the live segment that contains the specified index, or null if
/// there is none.
Segment *getSegmentContaining(SlotIndex Idx) {
iterator I = FindSegmentContaining(Idx);
return I == end() ? nullptr : &*I;
}
/// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
VNInfo *getVNInfoAt(SlotIndex Idx) const {
const_iterator I = FindSegmentContaining(Idx);
return I == end() ? nullptr : I->valno;
}
/// getVNInfoBefore - Return the VNInfo that is live up to but not
/// necessarilly including Idx, or NULL. Use this to find the reaching def
/// used by an instruction at this SlotIndex position.
VNInfo *getVNInfoBefore(SlotIndex Idx) const {
const_iterator I = FindSegmentContaining(Idx.getPrevSlot());
return I == end() ? nullptr : I->valno;
}
/// Return an iterator to the segment that contains the specified index, or
/// end() if there is none.
iterator FindSegmentContaining(SlotIndex Idx) {
iterator I = find(Idx);
return I != end() && I->start <= Idx ? I : end();
}
const_iterator FindSegmentContaining(SlotIndex Idx) const {
const_iterator I = find(Idx);
return I != end() && I->start <= Idx ? I : end();
}
/// overlaps - Return true if the intersection of the two live ranges is
/// not empty.
bool overlaps(const LiveRange &other) const {
if (other.empty())
return false;
return overlapsFrom(other, other.begin());
}
/// overlaps - Return true if the two ranges have overlapping segments
/// that are not coalescable according to CP.
///
/// Overlapping segments where one range is defined by a coalescable
/// copy are allowed.
bool overlaps(const LiveRange &Other, const CoalescerPair &CP,
const SlotIndexes&) const;
/// overlaps - Return true if the live range overlaps an interval specified
/// by [Start, End).
bool overlaps(SlotIndex Start, SlotIndex End) const;
/// overlapsFrom - Return true if the intersection of the two live ranges
/// is not empty. The specified iterator is a hint that we can begin
/// scanning the Other range starting at I.
bool overlapsFrom(const LiveRange &Other, const_iterator I) const;
/// Returns true if all segments of the @p Other live range are completely
/// covered by this live range.
/// Adjacent live ranges do not affect the covering:the liverange
/// [1,5](5,10] covers (3,7].
bool covers(const LiveRange &Other) const;
/// Add the specified Segment to this range, merging segments as
/// appropriate. This returns an iterator to the inserted segment (which
/// may have grown since it was inserted).
iterator addSegment(Segment S);
/// If this range is live before @p Use in the basic block that starts at
/// @p StartIdx, extend it to be live up to @p Use, and return the value. If
/// there is no segment before @p Use, return nullptr.
VNInfo *extendInBlock(SlotIndex StartIdx, SlotIndex Use);
/// join - Join two live ranges (this, and other) together. This applies
/// mappings to the value numbers in the LHS/RHS ranges as specified. If
/// the ranges are not joinable, this aborts.
void join(LiveRange &Other,
const int *ValNoAssignments,
const int *RHSValNoAssignments,
SmallVectorImpl<VNInfo *> &NewVNInfo);
/// True iff this segment is a single segment that lies between the
/// specified boundaries, exclusively. Vregs live across a backedge are not
/// considered local. The boundaries are expected to lie within an extended
/// basic block, so vregs that are not live out should contain no holes.
bool isLocal(SlotIndex Start, SlotIndex End) const {
return beginIndex() > Start.getBaseIndex() &&
endIndex() < End.getBoundaryIndex();
}
/// Remove the specified segment from this range. Note that the segment
/// must be a single Segment in its entirety.
void removeSegment(SlotIndex Start, SlotIndex End,
bool RemoveDeadValNo = false);
void removeSegment(Segment S, bool RemoveDeadValNo = false) {
removeSegment(S.start, S.end, RemoveDeadValNo);
}
/// Remove segment pointed to by iterator @p I from this range. This does
/// not remove dead value numbers.
iterator removeSegment(iterator I) {
return segments.erase(I);
}
/// Query Liveness at Idx.
/// The sub-instruction slot of Idx doesn't matter, only the instruction
/// it refers to is considered.
LiveQueryResult Query(SlotIndex Idx) const {
// Find the segment that enters the instruction.
const_iterator I = find(Idx.getBaseIndex());
const_iterator E = end();
if (I == E)
return LiveQueryResult(nullptr, nullptr, SlotIndex(), false);
// Is this an instruction live-in segment?
// If Idx is the start index of a basic block, include live-in segments
// that start at Idx.getBaseIndex().
VNInfo *EarlyVal = nullptr;
VNInfo *LateVal = nullptr;
SlotIndex EndPoint;
bool Kill = false;
if (I->start <= Idx.getBaseIndex()) {
EarlyVal = I->valno;
EndPoint = I->end;
// Move to the potentially live-out segment.
if (SlotIndex::isSameInstr(Idx, I->end)) {
Kill = true;
if (++I == E)
return LiveQueryResult(EarlyVal, LateVal, EndPoint, Kill);
}
// Special case: A PHIDef value can have its def in the middle of a
// segment if the value happens to be live out of the layout
// predecessor.
// Such a value is not live-in.
if (EarlyVal->def == Idx.getBaseIndex())
EarlyVal = nullptr;
}
// I now points to the segment that may be live-through, or defined by
// this instr. Ignore segments starting after the current instr.
if (!SlotIndex::isEarlierInstr(Idx, I->start)) {
LateVal = I->valno;
EndPoint = I->end;
}
return LiveQueryResult(EarlyVal, LateVal, EndPoint, Kill);
}
/// removeValNo - Remove all the segments defined by the specified value#.
/// Also remove the value# from value# list.
void removeValNo(VNInfo *ValNo);
/// Returns true if the live range is zero length, i.e. no live segments
/// span instructions. It doesn't pay to spill such a range.
bool isZeroLength(SlotIndexes *Indexes) const {
for (const Segment &S : segments)
if (Indexes->getNextNonNullIndex(S.start).getBaseIndex() <
S.end.getBaseIndex())
return false;
return true;
}
bool operator<(const LiveRange& other) const {
const SlotIndex &thisIndex = beginIndex();
const SlotIndex &otherIndex = other.beginIndex();
return thisIndex < otherIndex;
}
/// Flush segment set into the regular segment vector.
/// The method is to be called after the live range
/// has been created, if use of the segment set was
/// activated in the constructor of the live range.
void flushSegmentSet();
void print(raw_ostream &OS) const;
void dump() const;
/// \brief Walk the range and assert if any invariants fail to hold.
///
/// Note that this is a no-op when asserts are disabled.
#ifdef NDEBUG
void verify() const {}
#else
void verify() const;
#endif
protected:
/// Append a segment to the list of segments.
void append(const LiveRange::Segment S);
private:
friend class LiveRangeUpdater;
void addSegmentToSet(Segment S);
void markValNoForDeletion(VNInfo *V);
};
inline raw_ostream &operator<<(raw_ostream &OS, const LiveRange &LR) {
LR.print(OS);
return OS;
}
/// LiveInterval - This class represents the liveness of a register,
/// or stack slot.
class LiveInterval : public LiveRange {
public:
typedef LiveRange super;
/// A live range for subregisters. The LaneMask specifies which parts of the
/// super register are covered by the interval.
/// (@sa TargetRegisterInfo::getSubRegIndexLaneMask()).
class SubRange : public LiveRange {
public:
SubRange *Next;
unsigned LaneMask;
/// Constructs a new SubRange object.
SubRange(unsigned LaneMask)
: Next(nullptr), LaneMask(LaneMask) {
}
/// Constructs a new SubRange object by copying liveness from @p Other.
SubRange(unsigned LaneMask, const LiveRange &Other,
BumpPtrAllocator &Allocator)
: LiveRange(Other, Allocator), Next(nullptr), LaneMask(LaneMask) {
}
};
private:
SubRange *SubRanges; ///< Single linked list of subregister live ranges.
public:
const unsigned reg; // the register or stack slot of this interval.
float weight; // weight of this interval
LiveInterval(unsigned Reg, float Weight)
: SubRanges(nullptr), reg(Reg), weight(Weight) {}
~LiveInterval() {
clearSubRanges();
}
template<typename T>
class SingleLinkedListIterator {
T *P;
public:
SingleLinkedListIterator<T>(T *P) : P(P) {}
SingleLinkedListIterator<T> &operator++() {
P = P->Next;
return *this;
}
SingleLinkedListIterator<T> &operator++(int) {
SingleLinkedListIterator res = *this;
++*this;
return res;
}
bool operator!=(const SingleLinkedListIterator<T> &Other) {
return P != Other.operator->();
}
bool operator==(const SingleLinkedListIterator<T> &Other) {
return P == Other.operator->();
}
T &operator*() const {
return *P;
}
T *operator->() const {
return P;
}
};
typedef SingleLinkedListIterator<SubRange> subrange_iterator;
subrange_iterator subrange_begin() {
return subrange_iterator(SubRanges);
}
subrange_iterator subrange_end() {
return subrange_iterator(nullptr);
}
typedef SingleLinkedListIterator<const SubRange> const_subrange_iterator;
const_subrange_iterator subrange_begin() const {
return const_subrange_iterator(SubRanges);
}
const_subrange_iterator subrange_end() const {
return const_subrange_iterator(nullptr);
}
iterator_range<subrange_iterator> subranges() {
return make_range(subrange_begin(), subrange_end());
}
iterator_range<const_subrange_iterator> subranges() const {
return make_range(subrange_begin(), subrange_end());
}
/// Creates a new empty subregister live range. The range is added at the
/// beginning of the subrange list; subrange iterators stay valid.
SubRange *createSubRange(BumpPtrAllocator &Allocator, unsigned LaneMask) {
SubRange *Range = new (Allocator) SubRange(LaneMask);
appendSubRange(Range);
return Range;
}
/// Like createSubRange() but the new range is filled with a copy of the
/// liveness information in @p CopyFrom.
SubRange *createSubRangeFrom(BumpPtrAllocator &Allocator, unsigned LaneMask,
const LiveRange &CopyFrom) {
SubRange *Range = new (Allocator) SubRange(LaneMask, CopyFrom, Allocator);
appendSubRange(Range);
return Range;
}
/// Returns true if subregister liveness information is available.
bool hasSubRanges() const {
return SubRanges != nullptr;
}
/// Removes all subregister liveness information.
void clearSubRanges();
/// Removes all subranges without any segments (subranges without segments
/// are not considered valid and should only exist temporarily).
void removeEmptySubRanges();
/// Construct main live range by merging the SubRanges of @p LI.
void constructMainRangeFromSubranges(const SlotIndexes &Indexes,
VNInfo::Allocator &VNIAllocator);
/// getSize - Returns the sum of sizes of all the LiveRange's.
///
unsigned getSize() const;
/// isSpillable - Can this interval be spilled?
bool isSpillable() const {
return weight != llvm::huge_valf;
}
/// markNotSpillable - Mark interval as not spillable
void markNotSpillable() {
weight = llvm::huge_valf;
}
bool operator<(const LiveInterval& other) const {
const SlotIndex &thisIndex = beginIndex();
const SlotIndex &otherIndex = other.beginIndex();
return std::tie(thisIndex, reg) < std::tie(otherIndex, other.reg);
}
void print(raw_ostream &OS) const;
void dump() const;
/// \brief Walks the interval and assert if any invariants fail to hold.
///
/// Note that this is a no-op when asserts are disabled.
#ifdef NDEBUG
void verify(const MachineRegisterInfo *MRI = nullptr) const {}
#else
void verify(const MachineRegisterInfo *MRI = nullptr) const;
#endif
private:
/// Appends @p Range to SubRanges list.
void appendSubRange(SubRange *Range) {
Range->Next = SubRanges;
SubRanges = Range;
}
/// Free memory held by SubRange.
void freeSubRange(SubRange *S);
};
inline raw_ostream &operator<<(raw_ostream &OS, const LiveInterval &LI) {
LI.print(OS);
return OS;
}
raw_ostream &operator<<(raw_ostream &OS, const LiveRange::Segment &S);
inline bool operator<(SlotIndex V, const LiveRange::Segment &S) {
return V < S.start;
}
inline bool operator<(const LiveRange::Segment &S, SlotIndex V) {
return S.start < V;
}
/// Helper class for performant LiveRange bulk updates.
///
/// Calling LiveRange::addSegment() repeatedly can be expensive on large
/// live ranges because segments after the insertion point may need to be
/// shifted. The LiveRangeUpdater class can defer the shifting when adding
/// many segments in order.
///
/// The LiveRange will be in an invalid state until flush() is called.
class LiveRangeUpdater {
LiveRange *LR;
SlotIndex LastStart;
LiveRange::iterator WriteI;
LiveRange::iterator ReadI;
SmallVector<LiveRange::Segment, 16> Spills;
void mergeSpills();
public:
/// Create a LiveRangeUpdater for adding segments to LR.
/// LR will temporarily be in an invalid state until flush() is called.
LiveRangeUpdater(LiveRange *lr = nullptr) : LR(lr) {}
~LiveRangeUpdater() { flush(); }
/// Add a segment to LR and coalesce when possible, just like
/// LR.addSegment(). Segments should be added in increasing start order for
/// best performance.
void add(LiveRange::Segment);
void add(SlotIndex Start, SlotIndex End, VNInfo *VNI) {
add(LiveRange::Segment(Start, End, VNI));
}
/// Return true if the LR is currently in an invalid state, and flush()
/// needs to be called.
bool isDirty() const { return LastStart.isValid(); }
/// Flush the updater state to LR so it is valid and contains all added
/// segments.
void flush();
/// Select a different destination live range.
void setDest(LiveRange *lr) {
if (LR != lr && isDirty())
flush();
LR = lr;
}
/// Get the current destination live range.
LiveRange *getDest() const { return LR; }
void dump() const;
void print(raw_ostream&) const;
};
inline raw_ostream &operator<<(raw_ostream &OS, const LiveRangeUpdater &X) {
X.print(OS);
return OS;
}
/// ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a
/// LiveInterval into equivalence clases of connected components. A
/// LiveInterval that has multiple connected components can be broken into
/// multiple LiveIntervals.
///
/// Given a LiveInterval that may have multiple connected components, run:
///
/// unsigned numComps = ConEQ.Classify(LI);
/// if (numComps > 1) {
/// // allocate numComps-1 new LiveIntervals into LIS[1..]
/// ConEQ.Distribute(LIS);
/// }
class ConnectedVNInfoEqClasses {
LiveIntervals &LIS;
IntEqClasses EqClass;
// Note that values a and b are connected.
void Connect(unsigned a, unsigned b);
unsigned Renumber();
public:
explicit ConnectedVNInfoEqClasses(LiveIntervals &lis) : LIS(lis) {}
/// Classify - Classify the values in LI into connected components.
/// Return the number of connected components.
unsigned Classify(const LiveInterval *LI);
/// getEqClass - Classify creates equivalence classes numbered 0..N. Return
/// the equivalence class assigned the VNI.
unsigned getEqClass(const VNInfo *VNI) const { return EqClass[VNI->id]; }
/// Distribute - Distribute values in LIV[0] into a separate LiveInterval
/// for each connected component. LIV must have a LiveInterval for each
/// connected component. The LiveIntervals in Liv[1..] must be empty.
/// Instructions using LIV[0] are rewritten.
void Distribute(LiveInterval *LIV[], MachineRegisterInfo &MRI);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/DIEValue.def | //===- llvm/CodeGen/DIEValue.def - DIEValue types ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Macros for running through all types of DIEValue.
//
//===----------------------------------------------------------------------===//
#if !(defined HANDLE_DIEVALUE || defined HANDLE_DIEVALUE_SMALL || \
defined HANDLE_DIEVALUE_LARGE)
#error "Missing macro definition of HANDLE_DIEVALUE"
#endif
// Handler for all values.
#ifndef HANDLE_DIEVALUE
#define HANDLE_DIEVALUE(T)
#endif
// Handler for small values.
#ifndef HANDLE_DIEVALUE_SMALL
#define HANDLE_DIEVALUE_SMALL(T) HANDLE_DIEVALUE(T)
#endif
// Handler for large values.
#ifndef HANDLE_DIEVALUE_LARGE
#define HANDLE_DIEVALUE_LARGE(T) HANDLE_DIEVALUE(T)
#endif
HANDLE_DIEVALUE_SMALL(Integer)
HANDLE_DIEVALUE_SMALL(String)
HANDLE_DIEVALUE_SMALL(Expr)
HANDLE_DIEVALUE_SMALL(Label)
HANDLE_DIEVALUE_LARGE(Delta)
HANDLE_DIEVALUE_SMALL(Entry)
HANDLE_DIEVALUE_SMALL(TypeSignature)
HANDLE_DIEVALUE_LARGE(Block)
HANDLE_DIEVALUE_LARGE(Loc)
HANDLE_DIEVALUE_SMALL(LocList)
#undef HANDLE_DIEVALUE
#undef HANDLE_DIEVALUE_SMALL
#undef HANDLE_DIEVALUE_LARGE
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineLoopInfo.h | //===- llvm/CodeGen/MachineLoopInfo.h - Natural Loop Calculator -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the MachineLoopInfo class that is used to identify natural
// loops and determine the loop depth of various nodes of the CFG. Note that
// natural loops may actually be several loops that share the same header node.
//
// This analysis calculates the nesting structure of loops in a function. For
// each natural loop identified, this analysis identifies natural loops
// contained entirely within the loop and the basic blocks the make up the loop.
//
// It can calculate on the fly various bits of information, for example:
//
// * whether there is a preheader for the loop
// * the number of back edges to the header
// * whether or not a particular block branches out of the loop
// * the successor blocks of the loop
// * the loop depth
// * the trip count
// * etc...
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINELOOPINFO_H
#define LLVM_CODEGEN_MACHINELOOPINFO_H
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
namespace llvm {
// Implementation in LoopInfoImpl.h
class MachineLoop;
extern template class LoopBase<MachineBasicBlock, MachineLoop>;
class MachineLoop : public LoopBase<MachineBasicBlock, MachineLoop> {
public:
MachineLoop();
/// getTopBlock - Return the "top" block in the loop, which is the first
/// block in the linear layout, ignoring any parts of the loop not
/// contiguous with the part the contains the header.
MachineBasicBlock *getTopBlock();
/// getBottomBlock - Return the "bottom" block in the loop, which is the last
/// block in the linear layout, ignoring any parts of the loop not
/// contiguous with the part the contains the header.
MachineBasicBlock *getBottomBlock();
void dump() const;
private:
friend class LoopInfoBase<MachineBasicBlock, MachineLoop>;
explicit MachineLoop(MachineBasicBlock *MBB)
: LoopBase<MachineBasicBlock, MachineLoop>(MBB) {}
};
// Implementation in LoopInfoImpl.h
extern template class LoopInfoBase<MachineBasicBlock, MachineLoop>;
class MachineLoopInfo : public MachineFunctionPass {
LoopInfoBase<MachineBasicBlock, MachineLoop> LI;
friend class LoopBase<MachineBasicBlock, MachineLoop>;
void operator=(const MachineLoopInfo &) = delete;
MachineLoopInfo(const MachineLoopInfo &) = delete;
public:
static char ID; // Pass identification, replacement for typeid
MachineLoopInfo() : MachineFunctionPass(ID) {
initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
}
LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
/// iterator/begin/end - The interface to the top-level loops in the current
/// function.
///
typedef LoopInfoBase<MachineBasicBlock, MachineLoop>::iterator iterator;
inline iterator begin() const { return LI.begin(); }
inline iterator end() const { return LI.end(); }
bool empty() const { return LI.empty(); }
/// getLoopFor - Return the inner most loop that BB lives in. If a basic
/// block is in no loop (for example the entry node), null is returned.
///
inline MachineLoop *getLoopFor(const MachineBasicBlock *BB) const {
return LI.getLoopFor(BB);
}
/// operator[] - same as getLoopFor...
///
inline const MachineLoop *operator[](const MachineBasicBlock *BB) const {
return LI.getLoopFor(BB);
}
/// getLoopDepth - Return the loop nesting level of the specified block...
///
inline unsigned getLoopDepth(const MachineBasicBlock *BB) const {
return LI.getLoopDepth(BB);
}
// isLoopHeader - True if the block is a loop header node
inline bool isLoopHeader(const MachineBasicBlock *BB) const {
return LI.isLoopHeader(BB);
}
/// runOnFunction - Calculate the natural loop information.
///
bool runOnMachineFunction(MachineFunction &F) override;
void releaseMemory() override { LI.releaseMemory(); }
void getAnalysisUsage(AnalysisUsage &AU) const override;
/// removeLoop - This removes the specified top-level loop from this loop info
/// object. The loop is not deleted, as it will presumably be inserted into
/// another loop.
inline MachineLoop *removeLoop(iterator I) { return LI.removeLoop(I); }
/// changeLoopFor - Change the top-level loop that contains BB to the
/// specified loop. This should be used by transformations that restructure
/// the loop hierarchy tree.
inline void changeLoopFor(MachineBasicBlock *BB, MachineLoop *L) {
LI.changeLoopFor(BB, L);
}
/// changeTopLevelLoop - Replace the specified loop in the top-level loops
/// list with the indicated loop.
inline void changeTopLevelLoop(MachineLoop *OldLoop, MachineLoop *NewLoop) {
LI.changeTopLevelLoop(OldLoop, NewLoop);
}
/// addTopLevelLoop - This adds the specified loop to the collection of
/// top-level loops.
inline void addTopLevelLoop(MachineLoop *New) {
LI.addTopLevelLoop(New);
}
/// removeBlock - This method completely removes BB from all data structures,
/// including all of the Loop objects it is nested in and our mapping from
/// MachineBasicBlocks to loops.
void removeBlock(MachineBasicBlock *BB) {
LI.removeBlock(BB);
}
};
// Allow clients to walk the list of nested loops...
template <> struct GraphTraits<const MachineLoop*> {
typedef const MachineLoop NodeType;
typedef MachineLoopInfo::iterator ChildIteratorType;
static NodeType *getEntryNode(const MachineLoop *L) { return L; }
static inline ChildIteratorType child_begin(NodeType *N) {
return N->begin();
}
static inline ChildIteratorType child_end(NodeType *N) {
return N->end();
}
};
template <> struct GraphTraits<MachineLoop*> {
typedef MachineLoop NodeType;
typedef MachineLoopInfo::iterator ChildIteratorType;
static NodeType *getEntryNode(MachineLoop *L) { return L; }
static inline ChildIteratorType child_begin(NodeType *N) {
return N->begin();
}
static inline ChildIteratorType child_end(NodeType *N) {
return N->end();
}
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachinePassRegistry.h | //===-- llvm/CodeGen/MachinePassRegistry.h ----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the mechanics for machine function pass registries. A
// function pass registry (MachinePassRegistry) is auto filled by the static
// constructors of MachinePassRegistryNode. Further there is a command line
// parser (RegisterPassParser) which listens to each registry for additions
// and deletions, so that the appropriate command option is updated.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEPASSREGISTRY_H
#define LLVM_CODEGEN_MACHINEPASSREGISTRY_H
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/CommandLine.h"
namespace llvm {
typedef void *(*MachinePassCtor)();
//===----------------------------------------------------------------------===//
///
/// MachinePassRegistryListener - Listener to adds and removals of nodes in
/// registration list.
///
//===----------------------------------------------------------------------===//
class MachinePassRegistryListener {
virtual void anchor();
public:
MachinePassRegistryListener() {}
virtual ~MachinePassRegistryListener() {}
virtual void NotifyAdd(const char *N, MachinePassCtor C, const char *D) = 0;
virtual void NotifyRemove(const char *N) = 0;
};
//===----------------------------------------------------------------------===//
///
/// MachinePassRegistryNode - Machine pass node stored in registration list.
///
//===----------------------------------------------------------------------===//
class MachinePassRegistryNode {
private:
MachinePassRegistryNode *Next; // Next function pass in list.
const char *Name; // Name of function pass.
const char *Description; // Description string.
MachinePassCtor Ctor; // Function pass creator.
public:
MachinePassRegistryNode(const char *N, const char *D, MachinePassCtor C)
: Next(nullptr)
, Name(N)
, Description(D)
, Ctor(C)
{}
// Accessors
MachinePassRegistryNode *getNext() const { return Next; }
MachinePassRegistryNode **getNextAddress() { return &Next; }
const char *getName() const { return Name; }
const char *getDescription() const { return Description; }
MachinePassCtor getCtor() const { return Ctor; }
void setNext(MachinePassRegistryNode *N) { Next = N; }
};
//===----------------------------------------------------------------------===//
///
/// MachinePassRegistry - Track the registration of machine passes.
///
//===----------------------------------------------------------------------===//
class MachinePassRegistry {
private:
MachinePassRegistryNode *List; // List of registry nodes.
MachinePassCtor Default; // Default function pass creator.
MachinePassRegistryListener* Listener;// Listener for list adds are removes.
public:
// NO CONSTRUCTOR - we don't want static constructor ordering to mess
// with the registry.
// Accessors.
//
MachinePassRegistryNode *getList() { return List; }
MachinePassCtor getDefault() { return Default; }
void setDefault(MachinePassCtor C) { Default = C; }
void setDefault(StringRef Name);
void setListener(MachinePassRegistryListener *L) { Listener = L; }
/// Add - Adds a function pass to the registration list.
///
void Add(MachinePassRegistryNode *Node);
/// Remove - Removes a function pass from the registration list.
///
void Remove(MachinePassRegistryNode *Node);
};
//===----------------------------------------------------------------------===//
///
/// RegisterPassParser class - Handle the addition of new machine passes.
///
// //
///////////////////////////////////////////////////////////////////////////////
template<class RegistryClass>
class RegisterPassParser : public MachinePassRegistryListener,
public cl::parser<typename RegistryClass::FunctionPassCtor> {
public:
RegisterPassParser(cl::Option &O)
: cl::parser<typename RegistryClass::FunctionPassCtor>(O) {}
~RegisterPassParser() override { RegistryClass::setListener(nullptr); }
void initialize() {
cl::parser<typename RegistryClass::FunctionPassCtor>::initialize();
// Add existing passes to option.
for (RegistryClass *Node = RegistryClass::getList();
Node; Node = Node->getNext()) {
this->addLiteralOption(Node->getName(),
(typename RegistryClass::FunctionPassCtor)Node->getCtor(),
Node->getDescription());
}
// Make sure we listen for list changes.
RegistryClass::setListener(this);
}
// Implement the MachinePassRegistryListener callbacks.
//
void NotifyAdd(const char *N, MachinePassCtor C, const char *D) override {
this->addLiteralOption(N, (typename RegistryClass::FunctionPassCtor)C, D);
}
void NotifyRemove(const char *N) override {
this->removeLiteralOption(N);
}
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/EdgeBundles.h | //===-------- EdgeBundles.h - Bundles of CFG edges --------------*- c++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The EdgeBundles analysis forms equivalence classes of CFG edges such that all
// edges leaving a machine basic block are in the same bundle, and all edges
// leaving a basic block are in the same bundle.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_EDGEBUNDLES_H
#define LLVM_CODEGEN_EDGEBUNDLES_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntEqClasses.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
namespace llvm {
class EdgeBundles : public MachineFunctionPass {
const MachineFunction *MF;
/// EC - Each edge bundle is an equivalence class. The keys are:
/// 2*BB->getNumber() -> Ingoing bundle.
/// 2*BB->getNumber()+1 -> Outgoing bundle.
IntEqClasses EC;
/// Blocks - Map each bundle to a list of basic block numbers.
SmallVector<SmallVector<unsigned, 8>, 4> Blocks;
public:
static char ID;
EdgeBundles() : MachineFunctionPass(ID) {}
/// getBundle - Return the ingoing (Out = false) or outgoing (Out = true)
/// bundle number for basic block #N
unsigned getBundle(unsigned N, bool Out) const { return EC[2 * N + Out]; }
/// getNumBundles - Return the total number of bundles in the CFG.
unsigned getNumBundles() const { return EC.getNumClasses(); }
/// getBlocks - Return an array of blocks that are connected to Bundle.
ArrayRef<unsigned> getBlocks(unsigned Bundle) const { return Blocks[Bundle]; }
/// getMachineFunction - Return the last machine function computed.
const MachineFunction *getMachineFunction() const { return MF; }
/// view - Visualize the annotated bipartite CFG with Graphviz.
void view() const;
private:
bool runOnMachineFunction(MachineFunction&) override;
void getAnalysisUsage(AnalysisUsage&) const override;
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/LinkAllAsmWriterComponents.h | //===- llvm/Codegen/LinkAllAsmWriterComponents.h ----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This header file pulls in all assembler writer related passes for tools like
// llc that need this functionality.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
#define LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
#include "llvm/CodeGen/GCs.h"
#include <cstdlib>
namespace {
struct ForceAsmWriterLinking {
ForceAsmWriterLinking() {
// We must reference the plug-ins in such a way that compilers will not
// delete it all as dead code, even with whole program optimization,
// yet is effectively a NO-OP. As the compiler isn't smart enough
// to know that getenv() never returns -1, this will do the job.
if (std::getenv("bar") != (char*) -1)
return;
llvm::linkOcamlGCPrinter();
llvm::linkErlangGCPrinter();
}
} ForceAsmWriterLinking; // Force link by creating a global definition.
}
#endif // LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineRegionInfo.h | //===- llvm/CodeGen/MachineRegionInfo.h -------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEREGIONINFO_H
#define LLVM_CODEGEN_MACHINEREGIONINFO_H
#include "llvm/Analysis/RegionInfo.h"
#include "llvm/Analysis/RegionIterator.h"
#include "llvm/CodeGen/MachineDominanceFrontier.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
namespace llvm {
class MachineDominatorTree;
struct MachinePostDominatorTree;
class MachineRegion;
class MachineRegionNode;
class MachineRegionInfo;
template<>
struct RegionTraits<MachineFunction> {
typedef MachineFunction FuncT;
typedef MachineBasicBlock BlockT;
typedef MachineRegion RegionT;
typedef MachineRegionNode RegionNodeT;
typedef MachineRegionInfo RegionInfoT;
typedef MachineDominatorTree DomTreeT;
typedef MachineDomTreeNode DomTreeNodeT;
typedef MachinePostDominatorTree PostDomTreeT;
typedef MachineDominanceFrontier DomFrontierT;
typedef MachineInstr InstT;
typedef MachineLoop LoopT;
typedef MachineLoopInfo LoopInfoT;
static unsigned getNumSuccessors(MachineBasicBlock *BB) {
return BB->succ_size();
}
};
class MachineRegionNode : public RegionNodeBase<RegionTraits<MachineFunction>> {
public:
inline MachineRegionNode(MachineRegion *Parent,
MachineBasicBlock *Entry,
bool isSubRegion = false)
: RegionNodeBase<RegionTraits<MachineFunction>>(Parent, Entry, isSubRegion) {
}
bool operator==(const MachineRegion &RN) const {
return this == reinterpret_cast<const MachineRegionNode*>(&RN);
}
};
class MachineRegion : public RegionBase<RegionTraits<MachineFunction>> {
public:
MachineRegion(MachineBasicBlock *Entry, MachineBasicBlock *Exit,
MachineRegionInfo* RI,
MachineDominatorTree *DT, MachineRegion *Parent = nullptr);
~MachineRegion();
bool operator==(const MachineRegionNode &RN) const {
return &RN == reinterpret_cast<const MachineRegionNode*>(this);
}
};
class MachineRegionInfo : public RegionInfoBase<RegionTraits<MachineFunction>> {
public:
explicit MachineRegionInfo();
~MachineRegionInfo() override;
// updateStatistics - Update statistic about created regions.
void updateStatistics(MachineRegion *R) final;
void recalculate(MachineFunction &F,
MachineDominatorTree *DT,
MachinePostDominatorTree *PDT,
MachineDominanceFrontier *DF);
};
class MachineRegionInfoPass : public MachineFunctionPass {
MachineRegionInfo RI;
public:
static char ID;
explicit MachineRegionInfoPass();
~MachineRegionInfoPass() override;
MachineRegionInfo &getRegionInfo() {
return RI;
}
const MachineRegionInfo &getRegionInfo() const {
return RI;
}
/// @name MachineFunctionPass interface
//@{
bool runOnMachineFunction(MachineFunction &F) override;
void releaseMemory() override;
void verifyAnalysis() const override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
void print(raw_ostream &OS, const Module *) const override;
void dump() const;
//@}
};
template <>
template <>
inline MachineBasicBlock* RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineBasicBlock>() const {
assert(!isSubRegion() && "This is not a MachineBasicBlock RegionNode!");
return getEntry();
}
template<>
template<>
inline MachineRegion* RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineRegion>() const {
assert(isSubRegion() && "This is not a subregion RegionNode!");
auto Unconst = const_cast<RegionNodeBase<RegionTraits<MachineFunction>>*>(this);
return reinterpret_cast<MachineRegion*>(Unconst);
}
RegionNodeGraphTraits(MachineRegionNode, MachineBasicBlock, MachineRegion);
RegionNodeGraphTraits(const MachineRegionNode, MachineBasicBlock, MachineRegion);
RegionGraphTraits(MachineRegion, MachineRegionNode);
RegionGraphTraits(const MachineRegion, const MachineRegionNode);
template <> struct GraphTraits<MachineRegionInfo*>
: public GraphTraits<FlatIt<MachineRegionNode*> > {
typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false,
GraphTraits<FlatIt<NodeType*> > > nodes_iterator;
static NodeType *getEntryNode(MachineRegionInfo *RI) {
return GraphTraits<FlatIt<MachineRegion*> >::getEntryNode(RI->getTopLevelRegion());
}
static nodes_iterator nodes_begin(MachineRegionInfo* RI) {
return nodes_iterator::begin(getEntryNode(RI));
}
static nodes_iterator nodes_end(MachineRegionInfo *RI) {
return nodes_iterator::end(getEntryNode(RI));
}
};
template <> struct GraphTraits<MachineRegionInfoPass*>
: public GraphTraits<MachineRegionInfo *> {
typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false,
GraphTraits<FlatIt<NodeType*> > > nodes_iterator;
static NodeType *getEntryNode(MachineRegionInfoPass *RI) {
return GraphTraits<MachineRegionInfo*>::getEntryNode(&RI->getRegionInfo());
}
static nodes_iterator nodes_begin(MachineRegionInfoPass* RI) {
return GraphTraits<MachineRegionInfo*>::nodes_begin(&RI->getRegionInfo());
}
static nodes_iterator nodes_end(MachineRegionInfoPass *RI) {
return GraphTraits<MachineRegionInfo*>::nodes_end(&RI->getRegionInfo());
}
};
extern template class RegionBase<RegionTraits<MachineFunction>>;
extern template class RegionNodeBase<RegionTraits<MachineFunction>>;
extern template class RegionInfoBase<RegionTraits<MachineFunction>>;
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/GCs.h | //===-- GCs.h - Garbage collector linkage hacks ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains hack functions to force linking in the GC components.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_GCS_H
#define LLVM_CODEGEN_GCS_H
namespace llvm {
class GCStrategy;
class GCMetadataPrinter;
/// FIXME: Collector instances are not useful on their own. These no longer
/// serve any purpose except to link in the plugins.
/// Creates a CoreCLR-compatible garbage collector.
void linkCoreCLRGC();
/// Creates an ocaml-compatible garbage collector.
void linkOcamlGC();
/// Creates an ocaml-compatible metadata printer.
void linkOcamlGCPrinter();
/// Creates an erlang-compatible garbage collector.
void linkErlangGC();
/// Creates an erlang-compatible metadata printer.
void linkErlangGCPrinter();
/// Creates a shadow stack garbage collector. This collector requires no code
/// generator support.
void linkShadowStackGC();
void linkStatepointExampleGC();
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/RegAllocRegistry.h | //===-- llvm/CodeGen/RegAllocRegistry.h -------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation for register allocator function
// pass registry (RegisterRegAlloc).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_REGALLOCREGISTRY_H
#define LLVM_CODEGEN_REGALLOCREGISTRY_H
#include "llvm/CodeGen/MachinePassRegistry.h"
namespace llvm {
//===----------------------------------------------------------------------===//
///
/// RegisterRegAlloc class - Track the registration of register allocators.
///
// //
///////////////////////////////////////////////////////////////////////////////
class RegisterRegAlloc : public MachinePassRegistryNode {
public:
typedef FunctionPass *(*FunctionPassCtor)();
static MachinePassRegistry Registry;
RegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
: MachinePassRegistryNode(N, D, (MachinePassCtor)C)
{
Registry.Add(this);
}
~RegisterRegAlloc() { Registry.Remove(this); }
// Accessors.
//
RegisterRegAlloc *getNext() const {
return (RegisterRegAlloc *)MachinePassRegistryNode::getNext();
}
static RegisterRegAlloc *getList() {
return (RegisterRegAlloc *)Registry.getList();
}
static FunctionPassCtor getDefault() {
return (FunctionPassCtor)Registry.getDefault();
}
static void setDefault(FunctionPassCtor C) {
Registry.setDefault((MachinePassCtor)C);
}
static void setListener(MachinePassRegistryListener *L) {
Registry.setListener(L);
}
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/LexicalScopes.h | //===- LexicalScopes.cpp - Collecting lexical scope info -*- C++ -*--------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements LexicalScopes analysis.
//
// This pass collects lexical scope information and maps machine instructions
// to respective lexical scopes.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LEXICALSCOPES_H
#define LLVM_CODEGEN_LEXICALSCOPES_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/ValueHandle.h"
#include <unordered_map>
#include <utility>
namespace llvm {
class MachineInstr;
class MachineBasicBlock;
class MachineFunction;
//===----------------------------------------------------------------------===//
/// InsnRange - This is used to track range of instructions with identical
/// lexical scope.
///
typedef std::pair<const MachineInstr *, const MachineInstr *> InsnRange;
//===----------------------------------------------------------------------===//
/// LexicalScope - This class is used to track scope information.
///
class LexicalScope {
public:
LexicalScope(LexicalScope *P, const DILocalScope *D, const DILocation *I,
bool A)
: Parent(P), Desc(D), InlinedAtLocation(I), AbstractScope(A),
LastInsn(nullptr), FirstInsn(nullptr), DFSIn(0), DFSOut(0) {
assert((!D || D->isResolved()) && "Expected resolved node");
assert((!I || I->isResolved()) && "Expected resolved node");
if (Parent)
Parent->addChild(this);
}
// Accessors.
LexicalScope *getParent() const { return Parent; }
const MDNode *getDesc() const { return Desc; }
const DILocation *getInlinedAt() const { return InlinedAtLocation; }
const DILocalScope *getScopeNode() const { return Desc; }
bool isAbstractScope() const { return AbstractScope; }
SmallVectorImpl<LexicalScope *> &getChildren() { return Children; }
SmallVectorImpl<InsnRange> &getRanges() { return Ranges; }
/// addChild - Add a child scope.
void addChild(LexicalScope *S) { Children.push_back(S); }
/// openInsnRange - This scope covers instruction range starting from MI.
void openInsnRange(const MachineInstr *MI) {
if (!FirstInsn)
FirstInsn = MI;
if (Parent)
Parent->openInsnRange(MI);
}
/// extendInsnRange - Extend the current instruction range covered by
/// this scope.
void extendInsnRange(const MachineInstr *MI) {
assert(FirstInsn && "MI Range is not open!");
LastInsn = MI;
if (Parent)
Parent->extendInsnRange(MI);
}
/// closeInsnRange - Create a range based on FirstInsn and LastInsn collected
/// until now. This is used when a new scope is encountered while walking
/// machine instructions.
void closeInsnRange(LexicalScope *NewScope = nullptr) {
assert(LastInsn && "Last insn missing!");
Ranges.push_back(InsnRange(FirstInsn, LastInsn));
FirstInsn = nullptr;
LastInsn = nullptr;
// If Parent dominates NewScope then do not close Parent's instruction
// range.
if (Parent && (!NewScope || !Parent->dominates(NewScope)))
Parent->closeInsnRange(NewScope);
}
/// dominates - Return true if current scope dominates given lexical scope.
bool dominates(const LexicalScope *S) const {
if (S == this)
return true;
if (DFSIn < S->getDFSIn() && DFSOut > S->getDFSOut())
return true;
return false;
}
// Depth First Search support to walk and manipulate LexicalScope hierarchy.
unsigned getDFSOut() const { return DFSOut; }
void setDFSOut(unsigned O) { DFSOut = O; }
unsigned getDFSIn() const { return DFSIn; }
void setDFSIn(unsigned I) { DFSIn = I; }
/// dump - print lexical scope.
void dump(unsigned Indent = 0) const;
private:
LexicalScope *Parent; // Parent to this scope.
const DILocalScope *Desc; // Debug info descriptor.
const DILocation *InlinedAtLocation; // Location at which this
// scope is inlined.
bool AbstractScope; // Abstract Scope
SmallVector<LexicalScope *, 4> Children; // Scopes defined in scope.
// Contents not owned.
SmallVector<InsnRange, 4> Ranges;
const MachineInstr *LastInsn; // Last instruction of this scope.
const MachineInstr *FirstInsn; // First instruction of this scope.
unsigned DFSIn, DFSOut; // In & Out Depth use to determine
// scope nesting.
};
// //
///////////////////////////////////////////////////////////////////////////////
/// LexicalScopes - This class provides interface to collect and use lexical
/// scoping information from machine instruction.
///
class LexicalScopes {
public:
LexicalScopes() : MF(nullptr), CurrentFnLexicalScope(nullptr) {}
/// initialize - Scan machine function and constuct lexical scope nest, resets
/// the instance if necessary.
void initialize(const MachineFunction &);
/// releaseMemory - release memory.
void reset();
/// empty - Return true if there is any lexical scope information available.
bool empty() { return CurrentFnLexicalScope == nullptr; }
/// getCurrentFunctionScope - Return lexical scope for the current function.
LexicalScope *getCurrentFunctionScope() const {
return CurrentFnLexicalScope;
}
/// getMachineBasicBlocks - Populate given set using machine basic blocks
/// which have machine instructions that belong to lexical scope identified by
/// DebugLoc.
void getMachineBasicBlocks(const DILocation *DL,
SmallPtrSetImpl<const MachineBasicBlock *> &MBBs);
/// dominates - Return true if DebugLoc's lexical scope dominates at least one
/// machine instruction's lexical scope in a given machine basic block.
bool dominates(const DILocation *DL, MachineBasicBlock *MBB);
/// findLexicalScope - Find lexical scope, either regular or inlined, for the
/// given DebugLoc. Return NULL if not found.
LexicalScope *findLexicalScope(const DILocation *DL);
/// getAbstractScopesList - Return a reference to list of abstract scopes.
ArrayRef<LexicalScope *> getAbstractScopesList() const {
return AbstractScopesList;
}
/// findAbstractScope - Find an abstract scope or return null.
LexicalScope *findAbstractScope(const DILocalScope *N) {
auto I = AbstractScopeMap.find(N);
return I != AbstractScopeMap.end() ? &I->second : nullptr;
}
/// findInlinedScope - Find an inlined scope for the given scope/inlined-at.
LexicalScope *findInlinedScope(const DILocalScope *N, const DILocation *IA) {
auto I = InlinedLexicalScopeMap.find(std::make_pair(N, IA));
return I != InlinedLexicalScopeMap.end() ? &I->second : nullptr;
}
/// findLexicalScope - Find regular lexical scope or return null.
LexicalScope *findLexicalScope(const DILocalScope *N) {
auto I = LexicalScopeMap.find(N);
return I != LexicalScopeMap.end() ? &I->second : nullptr;
}
/// dump - Print data structures to dbgs().
void dump();
/// getOrCreateAbstractScope - Find or create an abstract lexical scope.
LexicalScope *getOrCreateAbstractScope(const DILocalScope *Scope);
private:
/// getOrCreateLexicalScope - Find lexical scope for the given Scope/IA. If
/// not available then create new lexical scope.
LexicalScope *getOrCreateLexicalScope(const DILocalScope *Scope,
const DILocation *IA = nullptr);
LexicalScope *getOrCreateLexicalScope(const DILocation *DL) {
return DL ? getOrCreateLexicalScope(DL->getScope(), DL->getInlinedAt())
: nullptr;
}
/// getOrCreateRegularScope - Find or create a regular lexical scope.
LexicalScope *getOrCreateRegularScope(const DILocalScope *Scope);
/// getOrCreateInlinedScope - Find or create an inlined lexical scope.
LexicalScope *getOrCreateInlinedScope(const DILocalScope *Scope,
const DILocation *InlinedAt);
/// extractLexicalScopes - Extract instruction ranges for each lexical scopes
/// for the given machine function.
void extractLexicalScopes(SmallVectorImpl<InsnRange> &MIRanges,
DenseMap<const MachineInstr *, LexicalScope *> &M);
void constructScopeNest(LexicalScope *Scope);
void
assignInstructionRanges(SmallVectorImpl<InsnRange> &MIRanges,
DenseMap<const MachineInstr *, LexicalScope *> &M);
private:
const MachineFunction *MF;
/// LexicalScopeMap - Tracks the scopes in the current function.
// Use an unordered_map to ensure value pointer validity over insertion.
std::unordered_map<const DILocalScope *, LexicalScope> LexicalScopeMap;
/// InlinedLexicalScopeMap - Tracks inlined function scopes in current
/// function.
std::unordered_map<std::pair<const DILocalScope *, const DILocation *>,
LexicalScope,
pair_hash<const DILocalScope *, const DILocation *>>
InlinedLexicalScopeMap;
/// AbstractScopeMap - These scopes are not included LexicalScopeMap.
// Use an unordered_map to ensure value pointer validity over insertion.
std::unordered_map<const DILocalScope *, LexicalScope> AbstractScopeMap;
/// AbstractScopesList - Tracks abstract scopes constructed while processing
/// a function.
SmallVector<LexicalScope *, 4> AbstractScopesList;
/// CurrentFnLexicalScope - Top level scope for the current function.
///
LexicalScope *CurrentFnLexicalScope;
};
} // end llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/ScheduleHazardRecognizer.h | //=- llvm/CodeGen/ScheduleHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the ScheduleHazardRecognizer class, which implements
// hazard-avoidance heuristics for scheduling.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
#define LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
namespace llvm {
class SUnit;
/// HazardRecognizer - This determines whether or not an instruction can be
/// issued this cycle, and whether or not a noop needs to be inserted to handle
/// the hazard.
class ScheduleHazardRecognizer {
protected:
/// MaxLookAhead - Indicate the number of cycles in the scoreboard
/// state. Important to restore the state after backtracking. Additionally,
/// MaxLookAhead=0 identifies a fake recognizer, allowing the client to
/// bypass virtual calls. Currently the PostRA scheduler ignores it.
unsigned MaxLookAhead;
public:
ScheduleHazardRecognizer(): MaxLookAhead(0) {}
virtual ~ScheduleHazardRecognizer();
enum HazardType {
NoHazard, // This instruction can be emitted at this cycle.
Hazard, // This instruction can't be emitted at this cycle.
NoopHazard // This instruction can't be emitted, and needs noops.
};
unsigned getMaxLookAhead() const { return MaxLookAhead; }
bool isEnabled() const { return MaxLookAhead != 0; }
/// atIssueLimit - Return true if no more instructions may be issued in this
/// cycle.
///
/// FIXME: remove this once MachineScheduler is the only client.
virtual bool atIssueLimit() const { return false; }
/// getHazardType - Return the hazard type of emitting this node. There are
/// three possible results. Either:
/// * NoHazard: it is legal to issue this instruction on this cycle.
/// * Hazard: issuing this instruction would stall the machine. If some
/// other instruction is available, issue it first.
/// * NoopHazard: issuing this instruction would break the program. If
/// some other instruction can be issued, do so, otherwise issue a noop.
virtual HazardType getHazardType(SUnit *m, int Stalls = 0) {
return NoHazard;
}
/// Reset - This callback is invoked when a new block of
/// instructions is about to be schedule. The hazard state should be
/// set to an initialized state.
virtual void Reset() {}
/// EmitInstruction - This callback is invoked when an instruction is
/// emitted, to advance the hazard state.
virtual void EmitInstruction(SUnit *) {}
/// PreEmitNoops - This callback is invoked prior to emitting an instruction.
/// It should return the number of noops to emit prior to the provided
/// instruction.
/// Note: This is only used during PostRA scheduling. EmitNoop is not called
/// for these noops.
virtual unsigned PreEmitNoops(SUnit *) {
return 0;
}
/// ShouldPreferAnother - This callback may be invoked if getHazardType
/// returns NoHazard. If, even though there is no hazard, it would be better to
/// schedule another available instruction, this callback should return true.
virtual bool ShouldPreferAnother(SUnit *) {
return false;
}
/// AdvanceCycle - This callback is invoked whenever the next top-down
/// instruction to be scheduled cannot issue in the current cycle, either
/// because of latency or resource conflicts. This should increment the
/// internal state of the hazard recognizer so that previously "Hazard"
/// instructions will now not be hazards.
virtual void AdvanceCycle() {}
/// RecedeCycle - This callback is invoked whenever the next bottom-up
/// instruction to be scheduled cannot issue in the current cycle, either
/// because of latency or resource conflicts.
virtual void RecedeCycle() {}
/// EmitNoop - This callback is invoked when a noop was added to the
/// instruction stream.
virtual void EmitNoop() {
// Default implementation: count it as a cycle.
AdvanceCycle();
}
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/ResourcePriorityQueue.h | //===----- ResourcePriorityQueue.h - A DFA-oriented priority queue -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the ResourcePriorityQueue class, which is a
// SchedulingPriorityQueue that schedules using DFA state to
// reduce the length of the critical path through the basic block
// on VLIW platforms.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_RESOURCEPRIORITYQUEUE_H
#define LLVM_CODEGEN_RESOURCEPRIORITYQUEUE_H
#include "llvm/CodeGen/DFAPacketizer.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
class ResourcePriorityQueue;
/// Sorting functions for the Available queue.
struct resource_sort : public std::binary_function<SUnit*, SUnit*, bool> {
ResourcePriorityQueue *PQ;
explicit resource_sort(ResourcePriorityQueue *pq) : PQ(pq) {}
bool operator()(const SUnit* left, const SUnit* right) const;
};
class ResourcePriorityQueue : public SchedulingPriorityQueue {
/// SUnits - The SUnits for the current graph.
std::vector<SUnit> *SUnits;
/// NumNodesSolelyBlocking - This vector contains, for every node in the
/// Queue, the number of nodes that the node is the sole unscheduled
/// predecessor for. This is used as a tie-breaker heuristic for better
/// mobility.
std::vector<unsigned> NumNodesSolelyBlocking;
/// Queue - The queue.
std::vector<SUnit*> Queue;
/// RegPressure - Tracking current reg pressure per register class.
///
std::vector<unsigned> RegPressure;
/// RegLimit - Tracking the number of allocatable registers per register
/// class.
std::vector<unsigned> RegLimit;
resource_sort Picker;
const TargetRegisterInfo *TRI;
const TargetLowering *TLI;
const TargetInstrInfo *TII;
const InstrItineraryData* InstrItins;
/// ResourcesModel - Represents VLIW state.
/// Not limited to VLIW targets per say, but assumes
/// definition of DFA by a target.
std::unique_ptr<DFAPacketizer> ResourcesModel;
/// Resource model - packet/bundle model. Purely
/// internal at the time.
std::vector<SUnit*> Packet;
/// Heuristics for estimating register pressure.
unsigned ParallelLiveRanges;
signed HorizontalVerticalBalance;
public:
ResourcePriorityQueue(SelectionDAGISel *IS);
bool isBottomUp() const override { return false; }
void initNodes(std::vector<SUnit> &sunits) override;
void addNode(const SUnit *SU) override {
NumNodesSolelyBlocking.resize(SUnits->size(), 0);
}
void updateNode(const SUnit *SU) override {}
void releaseState() override {
SUnits = nullptr;
}
unsigned getLatency(unsigned NodeNum) const {
assert(NodeNum < (*SUnits).size());
return (*SUnits)[NodeNum].getHeight();
}
unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
assert(NodeNum < NumNodesSolelyBlocking.size());
return NumNodesSolelyBlocking[NodeNum];
}
/// Single cost function reflecting benefit of scheduling SU
/// in the current cycle.
signed SUSchedulingCost (SUnit *SU);
/// InitNumRegDefsLeft - Determine the # of regs defined by this node.
///
void initNumRegDefsLeft(SUnit *SU);
void updateNumRegDefsLeft(SUnit *SU);
signed regPressureDelta(SUnit *SU, bool RawPressure = false);
signed rawRegPressureDelta (SUnit *SU, unsigned RCId);
bool empty() const override { return Queue.empty(); }
void push(SUnit *U) override;
SUnit *pop() override;
void remove(SUnit *SU) override;
/// scheduledNode - Main resource tracking point.
void scheduledNode(SUnit *Node) override;
bool isResourceAvailable(SUnit *SU);
void reserveResources(SUnit *SU);
private:
void adjustPriorityOfUnscheduledPreds(SUnit *SU);
SUnit *getSingleUnscheduledPred(SUnit *SU);
unsigned numberRCValPredInSU (SUnit *SU, unsigned RCId);
unsigned numberRCValSuccInSU (SUnit *SU, unsigned RCId);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MIRYamlMapping.h | //===- MIRYAMLMapping.h - Describes the mapping between MIR and YAML ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The MIR serialization library is currently a work in progress. It can't
// serialize machine functions at this time.
//
// This file implements the mapping between various MIR data structures and
// their corresponding YAML representation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_MIRYAMLMAPPING_H
#define LLVM_LIB_CODEGEN_MIRYAMLMAPPING_H
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/YAMLTraits.h"
#include <vector>
namespace llvm {
namespace yaml {
/// A wrapper around std::string which contains a source range that's being
/// set during parsing.
struct StringValue {
std::string Value;
SMRange SourceRange;
StringValue() {}
StringValue(std::string Value) : Value(std::move(Value)) {}
bool operator==(const StringValue &Other) const {
return Value == Other.Value;
}
};
template <> struct ScalarTraits<StringValue> {
static void output(const StringValue &S, void *, llvm::raw_ostream &OS) {
OS << S.Value;
}
static StringRef input(StringRef Scalar, void *Ctx, StringValue &S) {
S.Value = Scalar.str();
if (const auto *Node =
reinterpret_cast<yaml::Input *>(Ctx)->getCurrentNode())
S.SourceRange = Node->getSourceRange();
return "";
}
static bool mustQuote(StringRef Scalar) { return needsQuotes(Scalar); }
};
struct FlowStringValue : StringValue {
FlowStringValue() {}
FlowStringValue(std::string Value) : StringValue(Value) {}
};
template <> struct ScalarTraits<FlowStringValue> {
static void output(const FlowStringValue &S, void *, llvm::raw_ostream &OS) {
return ScalarTraits<StringValue>::output(S, nullptr, OS);
}
static StringRef input(StringRef Scalar, void *Ctx, FlowStringValue &S) {
return ScalarTraits<StringValue>::input(Scalar, Ctx, S);
}
static bool mustQuote(StringRef Scalar) { return needsQuotes(Scalar); }
};
} // end namespace yaml
} // end namespace llvm
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::StringValue)
LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::FlowStringValue)
namespace llvm {
namespace yaml {
struct VirtualRegisterDefinition {
unsigned ID;
StringValue Class;
// TODO: Serialize the virtual register hints.
};
template <> struct MappingTraits<VirtualRegisterDefinition> {
static void mapping(IO &YamlIO, VirtualRegisterDefinition &Reg) {
YamlIO.mapRequired("id", Reg.ID);
YamlIO.mapRequired("class", Reg.Class);
}
static const bool flow = true;
};
struct MachineBasicBlock {
unsigned ID;
StringValue Name;
unsigned Alignment = 0;
bool IsLandingPad = false;
bool AddressTaken = false;
// TODO: Serialize the successor weights.
std::vector<FlowStringValue> Successors;
std::vector<FlowStringValue> LiveIns;
std::vector<StringValue> Instructions;
};
template <> struct MappingTraits<MachineBasicBlock> {
static void mapping(IO &YamlIO, MachineBasicBlock &MBB) {
YamlIO.mapRequired("id", MBB.ID);
YamlIO.mapOptional("name", MBB.Name,
StringValue()); // Don't print out an empty name.
YamlIO.mapOptional("alignment", MBB.Alignment);
YamlIO.mapOptional("isLandingPad", MBB.IsLandingPad);
YamlIO.mapOptional("addressTaken", MBB.AddressTaken);
YamlIO.mapOptional("successors", MBB.Successors);
YamlIO.mapOptional("liveins", MBB.LiveIns);
YamlIO.mapOptional("instructions", MBB.Instructions);
}
};
/// Serializable representation of stack object from the MachineFrameInfo class.
///
/// The flags 'isImmutable' and 'isAliased' aren't serialized, as they are
/// determined by the object's type and frame information flags.
/// Dead stack objects aren't serialized.
///
/// TODO: Determine isPreallocated flag by mapping between objects and local
/// objects (Serialize local objects).
struct MachineStackObject {
enum ObjectType { DefaultType, SpillSlot, VariableSized };
// TODO: Serialize LLVM alloca reference.
unsigned ID;
ObjectType Type = DefaultType;
int64_t Offset = 0;
uint64_t Size = 0;
unsigned Alignment = 0;
};
template <> struct ScalarEnumerationTraits<MachineStackObject::ObjectType> {
static void enumeration(yaml::IO &IO, MachineStackObject::ObjectType &Type) {
IO.enumCase(Type, "default", MachineStackObject::DefaultType);
IO.enumCase(Type, "spill-slot", MachineStackObject::SpillSlot);
IO.enumCase(Type, "variable-sized", MachineStackObject::VariableSized);
}
};
template <> struct MappingTraits<MachineStackObject> {
static void mapping(yaml::IO &YamlIO, MachineStackObject &Object) {
YamlIO.mapRequired("id", Object.ID);
YamlIO.mapOptional(
"type", Object.Type,
MachineStackObject::DefaultType); // Don't print the default type.
YamlIO.mapOptional("offset", Object.Offset);
if (Object.Type != MachineStackObject::VariableSized)
YamlIO.mapRequired("size", Object.Size);
YamlIO.mapOptional("alignment", Object.Alignment);
}
static const bool flow = true;
};
/// Serializable representation of the fixed stack object from the
/// MachineFrameInfo class.
struct FixedMachineStackObject {
enum ObjectType { DefaultType, SpillSlot };
unsigned ID;
ObjectType Type = DefaultType;
int64_t Offset = 0;
uint64_t Size = 0;
unsigned Alignment = 0;
bool IsImmutable = false;
bool IsAliased = false;
};
template <>
struct ScalarEnumerationTraits<FixedMachineStackObject::ObjectType> {
static void enumeration(yaml::IO &IO,
FixedMachineStackObject::ObjectType &Type) {
IO.enumCase(Type, "default", FixedMachineStackObject::DefaultType);
IO.enumCase(Type, "spill-slot", FixedMachineStackObject::SpillSlot);
}
};
template <> struct MappingTraits<FixedMachineStackObject> {
static void mapping(yaml::IO &YamlIO, FixedMachineStackObject &Object) {
YamlIO.mapRequired("id", Object.ID);
YamlIO.mapOptional(
"type", Object.Type,
FixedMachineStackObject::DefaultType); // Don't print the default type.
YamlIO.mapOptional("offset", Object.Offset);
YamlIO.mapOptional("size", Object.Size);
YamlIO.mapOptional("alignment", Object.Alignment);
if (Object.Type != FixedMachineStackObject::SpillSlot) {
YamlIO.mapOptional("isImmutable", Object.IsImmutable);
YamlIO.mapOptional("isAliased", Object.IsAliased);
}
}
static const bool flow = true;
};
} // end namespace yaml
} // end namespace llvm
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::VirtualRegisterDefinition)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineBasicBlock)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineStackObject)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::FixedMachineStackObject)
namespace llvm {
namespace yaml {
/// Serializable representation of MachineFrameInfo.
///
/// Doesn't serialize attributes like 'StackAlignment', 'IsStackRealignable' and
/// 'RealignOption' as they are determined by the target and LLVM function
/// attributes.
/// It also doesn't serialize attributes like 'NumFixedObject' and
/// 'HasVarSizedObjects' as they are determined by the frame objects themselves.
struct MachineFrameInfo {
bool IsFrameAddressTaken = false;
bool IsReturnAddressTaken = false;
bool HasStackMap = false;
bool HasPatchPoint = false;
uint64_t StackSize = 0;
int OffsetAdjustment = 0;
unsigned MaxAlignment = 0;
bool AdjustsStack = false;
bool HasCalls = false;
// TODO: Serialize StackProtectorIdx and FunctionContextIdx
unsigned MaxCallFrameSize = 0;
// TODO: Serialize callee saved info.
// TODO: Serialize local frame objects.
bool HasOpaqueSPAdjustment = false;
bool HasVAStart = false;
bool HasMustTailInVarArgFunc = false;
// TODO: Serialize save and restore MBB references.
};
template <> struct MappingTraits<MachineFrameInfo> {
static void mapping(IO &YamlIO, MachineFrameInfo &MFI) {
YamlIO.mapOptional("isFrameAddressTaken", MFI.IsFrameAddressTaken);
YamlIO.mapOptional("isReturnAddressTaken", MFI.IsReturnAddressTaken);
YamlIO.mapOptional("hasStackMap", MFI.HasStackMap);
YamlIO.mapOptional("hasPatchPoint", MFI.HasPatchPoint);
YamlIO.mapOptional("stackSize", MFI.StackSize);
YamlIO.mapOptional("offsetAdjustment", MFI.OffsetAdjustment);
YamlIO.mapOptional("maxAlignment", MFI.MaxAlignment);
YamlIO.mapOptional("adjustsStack", MFI.AdjustsStack);
YamlIO.mapOptional("hasCalls", MFI.HasCalls);
YamlIO.mapOptional("maxCallFrameSize", MFI.MaxCallFrameSize);
YamlIO.mapOptional("hasOpaqueSPAdjustment", MFI.HasOpaqueSPAdjustment);
YamlIO.mapOptional("hasVAStart", MFI.HasVAStart);
YamlIO.mapOptional("hasMustTailInVarArgFunc", MFI.HasMustTailInVarArgFunc);
}
};
struct MachineFunction {
StringRef Name;
unsigned Alignment = 0;
bool ExposesReturnsTwice = false;
bool HasInlineAsm = false;
// Register information
bool IsSSA = false;
bool TracksRegLiveness = false;
bool TracksSubRegLiveness = false;
std::vector<VirtualRegisterDefinition> VirtualRegisters;
// TODO: Serialize the various register masks.
// TODO: Serialize live in registers.
// Frame information
MachineFrameInfo FrameInfo;
std::vector<FixedMachineStackObject> FixedStackObjects;
std::vector<MachineStackObject> StackObjects;
std::vector<MachineBasicBlock> BasicBlocks;
};
template <> struct MappingTraits<MachineFunction> {
static void mapping(IO &YamlIO, MachineFunction &MF) {
YamlIO.mapRequired("name", MF.Name);
YamlIO.mapOptional("alignment", MF.Alignment);
YamlIO.mapOptional("exposesReturnsTwice", MF.ExposesReturnsTwice);
YamlIO.mapOptional("hasInlineAsm", MF.HasInlineAsm);
YamlIO.mapOptional("isSSA", MF.IsSSA);
YamlIO.mapOptional("tracksRegLiveness", MF.TracksRegLiveness);
YamlIO.mapOptional("tracksSubRegLiveness", MF.TracksSubRegLiveness);
YamlIO.mapOptional("registers", MF.VirtualRegisters);
YamlIO.mapOptional("frameInfo", MF.FrameInfo);
YamlIO.mapOptional("fixedStack", MF.FixedStackObjects);
YamlIO.mapOptional("stack", MF.StackObjects);
YamlIO.mapOptional("body", MF.BasicBlocks);
}
};
} // end namespace yaml
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineJumpTableInfo.h | //===-- CodeGen/MachineJumpTableInfo.h - Abstract Jump Tables --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The MachineJumpTableInfo class keeps track of jump tables referenced by
// lowered switch instructions in the MachineFunction.
//
// Instructions reference the address of these jump tables through the use of
// MO_JumpTableIndex values. When emitting assembly or machine code, these
// virtual address references are converted to refer to the address of the
// function jump tables.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H
#define LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H
#include <cassert>
#include <vector>
namespace llvm {
class MachineBasicBlock;
class DataLayout;
class raw_ostream;
/// MachineJumpTableEntry - One jump table in the jump table info.
///
struct MachineJumpTableEntry {
/// MBBs - The vector of basic blocks from which to create the jump table.
std::vector<MachineBasicBlock*> MBBs;
explicit MachineJumpTableEntry(const std::vector<MachineBasicBlock*> &M)
: MBBs(M) {}
};
class MachineJumpTableInfo {
public:
/// JTEntryKind - This enum indicates how each entry of the jump table is
/// represented and emitted.
enum JTEntryKind {
/// EK_BlockAddress - Each entry is a plain address of block, e.g.:
/// .word LBB123
EK_BlockAddress,
/// EK_GPRel64BlockAddress - Each entry is an address of block, encoded
/// with a relocation as gp-relative, e.g.:
/// .gpdword LBB123
EK_GPRel64BlockAddress,
/// EK_GPRel32BlockAddress - Each entry is an address of block, encoded
/// with a relocation as gp-relative, e.g.:
/// .gprel32 LBB123
EK_GPRel32BlockAddress,
/// EK_LabelDifference32 - Each entry is the address of the block minus
/// the address of the jump table. This is used for PIC jump tables where
/// gprel32 is not supported. e.g.:
/// .word LBB123 - LJTI1_2
/// If the .set directive is supported, this is emitted as:
/// .set L4_5_set_123, LBB123 - LJTI1_2
/// .word L4_5_set_123
EK_LabelDifference32,
/// EK_Inline - Jump table entries are emitted inline at their point of
/// use. It is the responsibility of the target to emit the entries.
EK_Inline,
/// EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the
/// TargetLowering::LowerCustomJumpTableEntry hook.
EK_Custom32
};
private:
JTEntryKind EntryKind;
std::vector<MachineJumpTableEntry> JumpTables;
public:
explicit MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
JTEntryKind getEntryKind() const { return EntryKind; }
/// getEntrySize - Return the size of each entry in the jump table.
unsigned getEntrySize(const DataLayout &TD) const;
/// getEntryAlignment - Return the alignment of each entry in the jump table.
unsigned getEntryAlignment(const DataLayout &TD) const;
/// createJumpTableIndex - Create a new jump table.
///
unsigned createJumpTableIndex(const std::vector<MachineBasicBlock*> &DestBBs);
/// isEmpty - Return true if there are no jump tables.
///
bool isEmpty() const { return JumpTables.empty(); }
const std::vector<MachineJumpTableEntry> &getJumpTables() const {
return JumpTables;
}
/// RemoveJumpTable - Mark the specific index as being dead. This will
/// prevent it from being emitted.
void RemoveJumpTable(unsigned Idx) {
JumpTables[Idx].MBBs.clear();
}
/// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
/// the jump tables to branch to New instead.
bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New);
/// ReplaceMBBInJumpTable - If Old is a target of the jump tables, update
/// the jump table to branch to New instead.
bool ReplaceMBBInJumpTable(unsigned Idx, MachineBasicBlock *Old,
MachineBasicBlock *New);
/// print - Used by the MachineFunction printer to print information about
/// jump tables. Implemented in MachineFunction.cpp
///
void print(raw_ostream &OS) const;
/// dump - Call to stderr.
///
void dump() const;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/GCStrategy.h | //===-- llvm/CodeGen/GCStrategy.h - Garbage collection ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// GCStrategy coordinates code generation algorithms and implements some itself
// in order to generate code compatible with a target code generator as
// specified in a function's 'gc' attribute. Algorithms are enabled by setting
// flags in a subclass's constructor, and some virtual methods can be
// overridden.
//
// GCStrategy is relevant for implementations using either gc.root or
// gc.statepoint based lowering strategies, but is currently focused mostly on
// options for gc.root. This will change over time.
//
// When requested by a subclass of GCStrategy, the gc.root implementation will
// populate GCModuleInfo and GCFunctionInfo with that about each Function in
// the Module that opts in to garbage collection. Specifically:
//
// - Safe points
// Garbage collection is generally only possible at certain points in code.
// GCStrategy can request that the collector insert such points:
//
// - At and after any call to a subroutine
// - Before returning from the current function
// - Before backwards branches (loops)
//
// - Roots
// When a reference to a GC-allocated object exists on the stack, it must be
// stored in an alloca registered with llvm.gcoot.
//
// This information can used to emit the metadata tables which are required by
// the target garbage collector runtime.
//
// When used with gc.statepoint, information about safepoint and roots can be
// found in the binary StackMap section after code generation. Safepoint
// placement is currently the responsibility of the frontend, though late
// insertion support is planned. gc.statepoint does not currently support
// custom stack map formats; such can be generated by parsing the standard
// stack map section if desired.
//
// The read and write barrier support can be used with either implementation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_GCSTRATEGY_H
#define LLVM_IR_GCSTRATEGY_H
#include "llvm/ADT/Optional.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Registry.h"
#include <string>
namespace llvm {
namespace GC {
/// PointKind - Used to indicate whether the address of the call instruction
/// or the address after the call instruction is listed in the stackmap. For
/// most runtimes, PostCall safepoints are appropriate.
///
enum PointKind {
PreCall, ///< Instr is a call instruction.
PostCall ///< Instr is the return address of a call.
};
}
/// GCStrategy describes a garbage collector algorithm's code generation
/// requirements, and provides overridable hooks for those needs which cannot
/// be abstractly described. GCStrategy objects must be looked up through
/// the Function. The objects themselves are owned by the Context and must
/// be immutable.
class GCStrategy {
private:
std::string Name;
friend class GCModuleInfo;
protected:
bool UseStatepoints; /// Uses gc.statepoints as opposed to gc.roots,
/// if set, none of the other options can be
/// anything but their default values.
unsigned NeededSafePoints; ///< Bitmask of required safe points.
bool CustomReadBarriers; ///< Default is to insert loads.
bool CustomWriteBarriers; ///< Default is to insert stores.
bool CustomRoots; ///< Default is to pass through to backend.
bool InitRoots; ///< If set, roots are nulled during lowering.
bool UsesMetadata; ///< If set, backend must emit metadata tables.
public:
GCStrategy();
virtual ~GCStrategy() {}
/// Return the name of the GC strategy. This is the value of the collector
/// name string specified on functions which use this strategy.
const std::string &getName() const { return Name; }
/// By default, write barriers are replaced with simple store
/// instructions. If true, you must provide a custom pass to lower
/// calls to @llvm.gcwrite.
bool customWriteBarrier() const { return CustomWriteBarriers; }
/// By default, read barriers are replaced with simple load
/// instructions. If true, you must provide a custom pass to lower
/// calls to @llvm.gcread.
bool customReadBarrier() const { return CustomReadBarriers; }
/// Returns true if this strategy is expecting the use of gc.statepoints,
/// and false otherwise.
bool useStatepoints() const { return UseStatepoints; }
/** @name Statepoint Specific Properties */
///@{
/// If the value specified can be reliably distinguished, returns true for
/// pointers to GC managed locations and false for pointers to non-GC
/// managed locations. Note a GCStrategy can always return 'None' (i.e. an
/// empty optional indicating it can't reliably distinguish.
virtual Optional<bool> isGCManagedPointer(const Value *V) const {
return None;
}
///@}
/** @name GCRoot Specific Properties
* These properties and overrides only apply to collector strategies using
* GCRoot.
*/
///@{
/// True if safe points of any kind are required. By default, none are
/// recorded.
bool needsSafePoints() const { return NeededSafePoints != 0; }
/// True if the given kind of safe point is required. By default, none are
/// recorded.
bool needsSafePoint(GC::PointKind Kind) const {
return (NeededSafePoints & 1 << Kind) != 0;
}
/// By default, roots are left for the code generator so it can generate a
/// stack map. If true, you must provide a custom pass to lower
/// calls to @llvm.gcroot.
bool customRoots() const { return CustomRoots; }
/// If set, gcroot intrinsics should initialize their allocas to null
/// before the first use. This is necessary for most GCs and is enabled by
/// default.
bool initializeRoots() const { return InitRoots; }
/// If set, appropriate metadata tables must be emitted by the back-end
/// (assembler, JIT, or otherwise). For statepoint, this method is
/// currently unsupported. The stackmap information can be found in the
/// StackMap section as described in the documentation.
bool usesMetadata() const { return UsesMetadata; }
///@}
};
/// Subclasses of GCStrategy are made available for use during compilation by
/// adding them to the global GCRegistry. This can done either within the
/// LLVM source tree or via a loadable plugin. An example registeration
/// would be:
/// static GCRegistry::Add<CustomGC> X("custom-name",
/// "my custom supper fancy gc strategy");
///
/// Note that to use a custom GCMetadataPrinter w/gc.roots, you must also
/// register your GCMetadataPrinter subclass with the
/// GCMetadataPrinterRegistery as well.
typedef Registry<GCStrategy> GCRegistry;
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/ISDOpcodes.h | //===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares codegen opcodes and related utilities.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_ISDOPCODES_H
#define LLVM_CODEGEN_ISDOPCODES_H
namespace llvm {
/// ISD namespace - This namespace contains an enum which represents all of the
/// SelectionDAG node types and value types.
///
namespace ISD {
//===--------------------------------------------------------------------===//
/// ISD::NodeType enum - This enum defines the target-independent operators
/// for a SelectionDAG.
///
/// Targets may also define target-dependent operator codes for SDNodes. For
/// example, on x86, these are the enum values in the X86ISD namespace.
/// Targets should aim to use target-independent operators to model their
/// instruction sets as much as possible, and only use target-dependent
/// operators when they have special requirements.
///
/// Finally, during and after selection proper, SNodes may use special
/// operator codes that correspond directly with MachineInstr opcodes. These
/// are used to represent selected instructions. See the isMachineOpcode()
/// and getMachineOpcode() member functions of SDNode.
///
enum NodeType {
/// DELETED_NODE - This is an illegal value that is used to catch
/// errors. This opcode is not a legal opcode for any node.
DELETED_NODE,
/// EntryToken - This is the marker used to indicate the start of a region.
EntryToken,
/// TokenFactor - This node takes multiple tokens as input and produces a
/// single token result. This is used to represent the fact that the operand
/// operators are independent of each other.
TokenFactor,
/// AssertSext, AssertZext - These nodes record if a register contains a
/// value that has already been zero or sign extended from a narrower type.
/// These nodes take two operands. The first is the node that has already
/// been extended, and the second is a value type node indicating the width
/// of the extension
AssertSext, AssertZext,
/// Various leaf nodes.
BasicBlock, VALUETYPE, CONDCODE, Register, RegisterMask,
Constant, ConstantFP,
GlobalAddress, GlobalTLSAddress, FrameIndex,
JumpTable, ConstantPool, ExternalSymbol, BlockAddress,
/// The address of the GOT
GLOBAL_OFFSET_TABLE,
/// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
/// llvm.returnaddress on the DAG. These nodes take one operand, the index
/// of the frame or return address to return. An index of zero corresponds
/// to the current function's frame or return address, an index of one to
/// the parent's frame or return address, and so on.
FRAMEADDR, RETURNADDR,
/// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
/// Materializes the offset from the local object pointer of another
/// function to a particular local object passed to llvm.localescape. The
/// operand is the MCSymbol label used to represent this offset, since
/// typically the offset is not known until after code generation of the
/// parent.
LOCAL_RECOVER,
/// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on
/// the DAG, which implements the named register global variables extension.
READ_REGISTER,
WRITE_REGISTER,
/// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
/// first (possible) on-stack argument. This is needed for correct stack
/// adjustment during unwind.
FRAME_TO_ARGS_OFFSET,
/// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
/// 'eh_return' gcc dwarf builtin, which is used to return from
/// exception. The general meaning is: adjust stack by OFFSET and pass
/// execution to HANDLER. Many platform-related details also :)
EH_RETURN,
/// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
/// This corresponds to the eh.sjlj.setjmp intrinsic.
/// It takes an input chain and a pointer to the jump buffer as inputs
/// and returns an outchain.
EH_SJLJ_SETJMP,
/// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
/// This corresponds to the eh.sjlj.longjmp intrinsic.
/// It takes an input chain and a pointer to the jump buffer as inputs
/// and returns an outchain.
EH_SJLJ_LONGJMP,
/// TargetConstant* - Like Constant*, but the DAG does not do any folding,
/// simplification, or lowering of the constant. They are used for constants
/// which are known to fit in the immediate fields of their users, or for
/// carrying magic numbers which are not values which need to be
/// materialized in registers.
TargetConstant,
TargetConstantFP,
/// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
/// anything else with this node, and this is valid in the target-specific
/// dag, turning into a GlobalAddress operand.
TargetGlobalAddress,
TargetGlobalTLSAddress,
TargetFrameIndex,
TargetJumpTable,
TargetConstantPool,
TargetExternalSymbol,
TargetBlockAddress,
MCSymbol,
/// TargetIndex - Like a constant pool entry, but with completely
/// target-dependent semantics. Holds target flags, a 32-bit index, and a
/// 64-bit index. Targets can use this however they like.
TargetIndex,
/// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
/// This node represents a target intrinsic function with no side effects.
/// The first operand is the ID number of the intrinsic from the
/// llvm::Intrinsic namespace. The operands to the intrinsic follow. The
/// node returns the result of the intrinsic.
INTRINSIC_WO_CHAIN,
/// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
/// This node represents a target intrinsic function with side effects that
/// returns a result. The first operand is a chain pointer. The second is
/// the ID number of the intrinsic from the llvm::Intrinsic namespace. The
/// operands to the intrinsic follow. The node has two results, the result
/// of the intrinsic and an output chain.
INTRINSIC_W_CHAIN,
/// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
/// This node represents a target intrinsic function with side effects that
/// does not return a result. The first operand is a chain pointer. The
/// second is the ID number of the intrinsic from the llvm::Intrinsic
/// namespace. The operands to the intrinsic follow.
INTRINSIC_VOID,
/// CopyToReg - This node has three operands: a chain, a register number to
/// set to this value, and a value.
CopyToReg,
/// CopyFromReg - This node indicates that the input value is a virtual or
/// physical register that is defined outside of the scope of this
/// SelectionDAG. The register is available from the RegisterSDNode object.
CopyFromReg,
/// UNDEF - An undefined node.
UNDEF,
/// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
/// a Constant, which is required to be operand #1) half of the integer or
/// float value specified as operand #0. This is only for use before
/// legalization, for values that will be broken into multiple registers.
EXTRACT_ELEMENT,
/// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
/// Given two values of the same integer value type, this produces a value
/// twice as big. Like EXTRACT_ELEMENT, this can only be used before
/// legalization.
BUILD_PAIR,
/// MERGE_VALUES - This node takes multiple discrete operands and returns
/// them all as its individual results. This nodes has exactly the same
/// number of inputs and outputs. This node is useful for some pieces of the
/// code generator that want to think about a single node with multiple
/// results, not multiple nodes.
MERGE_VALUES,
/// Simple integer binary arithmetic operators.
ADD, SUB, MUL, SDIV, UDIV, SREM, UREM,
/// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
/// a signed/unsigned value of type i[2*N], and return the full value as
/// two results, each of type iN.
SMUL_LOHI, UMUL_LOHI,
/// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
/// remainder result.
SDIVREM, UDIVREM,
/// CARRY_FALSE - This node is used when folding other nodes,
/// like ADDC/SUBC, which indicate the carry result is always false.
CARRY_FALSE,
/// Carry-setting nodes for multiple precision addition and subtraction.
/// These nodes take two operands of the same value type, and produce two
/// results. The first result is the normal add or sub result, the second
/// result is the carry flag result.
ADDC, SUBC,
/// Carry-using nodes for multiple precision addition and subtraction. These
/// nodes take three operands: The first two are the normal lhs and rhs to
/// the add or sub, and the third is the input carry flag. These nodes
/// produce two results; the normal result of the add or sub, and the output
/// carry flag. These nodes both read and write a carry flag to allow them
/// to them to be chained together for add and sub of arbitrarily large
/// values.
ADDE, SUBE,
/// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
/// These nodes take two operands: the normal LHS and RHS to the add. They
/// produce two results: the normal result of the add, and a boolean that
/// indicates if an overflow occurred (*not* a flag, because it may be store
/// to memory, etc.). If the type of the boolean is not i1 then the high
/// bits conform to getBooleanContents.
/// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
SADDO, UADDO,
/// Same for subtraction.
SSUBO, USUBO,
/// Same for multiplication.
SMULO, UMULO,
/// Simple binary floating point operators.
FADD, FSUB, FMUL, FDIV, FREM,
/// FMA - Perform a * b + c with no intermediate rounding step.
FMA,
/// FMAD - Perform a * b + c, while getting the same result as the
/// separately rounded operations.
FMAD,
/// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y. NOTE: This
/// DAG node does not require that X and Y have the same type, just that
/// they are both floating point. X and the result must have the same type.
/// FCOPYSIGN(f32, f64) is allowed.
FCOPYSIGN,
/// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
/// value as an integer 0/1 value.
FGETSIGN,
/// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the
/// specified, possibly variable, elements. The number of elements is
/// required to be a power of two. The types of the operands must all be
/// the same and must match the vector element type, except that integer
/// types are allowed to be larger than the element type, in which case
/// the operands are implicitly truncated.
BUILD_VECTOR,
/// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
/// at IDX replaced with VAL. If the type of VAL is larger than the vector
/// element type then VAL is truncated before replacement.
INSERT_VECTOR_ELT,
/// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
/// identified by the (potentially variable) element number IDX. If the
/// return type is an integer type larger than the element type of the
/// vector, the result is extended to the width of the return type.
EXTRACT_VECTOR_ELT,
/// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
/// vector type with the same length and element type, this produces a
/// concatenated vector result value, with length equal to the sum of the
/// lengths of the input vectors.
CONCAT_VECTORS,
/// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector
/// with VECTOR2 inserted into VECTOR1 at the (potentially
/// variable) element number IDX, which must be a multiple of the
/// VECTOR2 vector length. The elements of VECTOR1 starting at
/// IDX are overwritten with VECTOR2. Elements IDX through
/// vector_length(VECTOR2) must be valid VECTOR1 indices.
INSERT_SUBVECTOR,
/// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an
/// vector value) starting with the element number IDX, which must be a
/// constant multiple of the result vector length.
EXTRACT_SUBVECTOR,
/// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
/// VEC1/VEC2. A VECTOR_SHUFFLE node also contains an array of constant int
/// values that indicate which value (or undef) each result element will
/// get. These constant ints are accessible through the
/// ShuffleVectorSDNode class. This is quite similar to the Altivec
/// 'vperm' instruction, except that the indices must be constants and are
/// in terms of the element size of VEC1/VEC2, not in terms of bytes.
VECTOR_SHUFFLE,
/// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
/// scalar value into element 0 of the resultant vector type. The top
/// elements 1 to N-1 of the N-element vector are undefined. The type
/// of the operand must match the vector element type, except when they
/// are integer types. In this case the operand is allowed to be wider
/// than the vector element type, and is implicitly truncated to it.
SCALAR_TO_VECTOR,
/// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
/// producing an unsigned/signed value of type i[2*N], then return the top
/// part.
MULHU, MULHS,
/// [US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned
/// integers.
SMIN, SMAX, UMIN, UMAX,
/// Bitwise operators - logical and, logical or, logical xor.
AND, OR, XOR,
/// Shift and rotation operations. After legalization, the type of the
/// shift amount is known to be TLI.getShiftAmountTy(). Before legalization
/// the shift amount can be any type, but care must be taken to ensure it is
/// large enough. TLI.getShiftAmountTy() is i8 on some targets, but before
/// legalization, types like i1024 can occur and i8 doesn't have enough bits
/// to represent the shift amount.
/// When the 1st operand is a vector, the shift amount must be in the same
/// type. (TLI.getShiftAmountTy() will return the same type when the input
/// type is a vector.)
SHL, SRA, SRL, ROTL, ROTR,
/// Byte Swap and Counting operators.
BSWAP, CTTZ, CTLZ, CTPOP,
/// Bit counting operators with an undefined result for zero inputs.
CTTZ_ZERO_UNDEF, CTLZ_ZERO_UNDEF,
/// Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
/// i1 then the high bits must conform to getBooleanContents.
SELECT,
/// Select with a vector condition (op #0) and two vector operands (ops #1
/// and #2), returning a vector result. All vectors have the same length.
/// Much like the scalar select and setcc, each bit in the condition selects
/// whether the corresponding result element is taken from op #1 or op #2.
/// At first, the VSELECT condition is of vXi1 type. Later, targets may
/// change the condition type in order to match the VSELECT node using a
/// pattern. The condition follows the BooleanContent format of the target.
VSELECT,
/// Select with condition operator - This selects between a true value and
/// a false value (ops #2 and #3) based on the boolean result of comparing
/// the lhs and rhs (ops #0 and #1) of a conditional expression with the
/// condition code in op #4, a CondCodeSDNode.
SELECT_CC,
/// SetCC operator - This evaluates to a true value iff the condition is
/// true. If the result value type is not i1 then the high bits conform
/// to getBooleanContents. The operands to this are the left and right
/// operands to compare (ops #0, and #1) and the condition code to compare
/// them with (op #2) as a CondCodeSDNode. If the operands are vector types
/// then the result type must also be a vector type.
SETCC,
/// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
/// integer shift operations, just like ADD/SUB_PARTS. The operation
/// ordering is:
/// [Lo,Hi] = op [LoLHS,HiLHS], Amt
SHL_PARTS, SRA_PARTS, SRL_PARTS,
/// Conversion operators. These are all single input single output
/// operations. For all of these, the result type must be strictly
/// wider or narrower (depending on the operation) than the source
/// type.
/// SIGN_EXTEND - Used for integer types, replicating the sign bit
/// into new bits.
SIGN_EXTEND,
/// ZERO_EXTEND - Used for integer types, zeroing the new bits.
ZERO_EXTEND,
/// ANY_EXTEND - Used for integer types. The high bits are undefined.
ANY_EXTEND,
/// TRUNCATE - Completely drop the high bits.
TRUNCATE,
/// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
/// depends on the first letter) to floating point.
SINT_TO_FP,
UINT_TO_FP,
/// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
/// sign extend a small value in a large integer register (e.g. sign
/// extending the low 8 bits of a 32-bit register to fill the top 24 bits
/// with the 7th bit). The size of the smaller type is indicated by the 1th
/// operand, a ValueType node.
SIGN_EXTEND_INREG,
/// ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an
/// in-register any-extension of the low lanes of an integer vector. The
/// result type must have fewer elements than the operand type, and those
/// elements must be larger integer types such that the total size of the
/// operand type and the result type match. Each of the low operand
/// elements is any-extended into the corresponding, wider result
/// elements with the high bits becoming undef.
ANY_EXTEND_VECTOR_INREG,
/// SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an
/// in-register sign-extension of the low lanes of an integer vector. The
/// result type must have fewer elements than the operand type, and those
/// elements must be larger integer types such that the total size of the
/// operand type and the result type match. Each of the low operand
/// elements is sign-extended into the corresponding, wider result
/// elements.
// FIXME: The SIGN_EXTEND_INREG node isn't specifically limited to
// scalars, but it also doesn't handle vectors well. Either it should be
// restricted to scalars or this node (and its handling) should be merged
// into it.
SIGN_EXTEND_VECTOR_INREG,
/// ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an
/// in-register zero-extension of the low lanes of an integer vector. The
/// result type must have fewer elements than the operand type, and those
/// elements must be larger integer types such that the total size of the
/// operand type and the result type match. Each of the low operand
/// elements is zero-extended into the corresponding, wider result
/// elements.
ZERO_EXTEND_VECTOR_INREG,
/// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
/// integer.
FP_TO_SINT,
FP_TO_UINT,
/// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
/// down to the precision of the destination VT. TRUNC is a flag, which is
/// always an integer that is zero or one. If TRUNC is 0, this is a
/// normal rounding, if it is 1, this FP_ROUND is known to not change the
/// value of Y.
///
/// The TRUNC = 1 case is used in cases where we know that the value will
/// not be modified by the node, because Y is not using any of the extra
/// precision of source type. This allows certain transformations like
/// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
/// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
FP_ROUND,
/// FLT_ROUNDS_ - Returns current rounding mode:
/// -1 Undefined
/// 0 Round to 0
/// 1 Round to nearest
/// 2 Round to +inf
/// 3 Round to -inf
FLT_ROUNDS_,
/// X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and
/// rounds it to a floating point value. It then promotes it and returns it
/// in a register of the same size. This operation effectively just
/// discards excess precision. The type to round down to is specified by
/// the VT operand, a VTSDNode.
FP_ROUND_INREG,
/// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
FP_EXTEND,
/// BITCAST - This operator converts between integer, vector and FP
/// values, as if the value was stored to memory with one type and loaded
/// from the same address with the other type (or equivalently for vector
/// format conversions, etc). The source and result are required to have
/// the same bit size (e.g. f32 <-> i32). This can also be used for
/// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
/// getNode().
BITCAST,
/// ADDRSPACECAST - This operator converts between pointers of different
/// address spaces.
ADDRSPACECAST,
/// CONVERT_RNDSAT - This operator is used to support various conversions
/// between various types (float, signed, unsigned and vectors of those
/// types) with rounding and saturation. NOTE: Avoid using this operator as
/// most target don't support it and the operator might be removed in the
/// future. It takes the following arguments:
/// 0) value
/// 1) dest type (type to convert to)
/// 2) src type (type to convert from)
/// 3) rounding imm
/// 4) saturation imm
/// 5) ISD::CvtCode indicating the type of conversion to do
CONVERT_RNDSAT,
/// FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions
/// and truncation for half-precision (16 bit) floating numbers. These nodes
/// form a semi-softened interface for dealing with f16 (as an i16), which
/// is often a storage-only type but has native conversions.
FP16_TO_FP, FP_TO_FP16,
/// FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
/// FLOG, FLOG2, FLOG10, FEXP, FEXP2,
/// FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary
/// floating point operations. These are inspired by libm.
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
FLOG, FLOG2, FLOG10, FEXP, FEXP2,
FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR,
FMINNUM, FMAXNUM,
/// FSINCOS - Compute both fsin and fcos as a single operation.
FSINCOS,
/// LOAD and STORE have token chains as their first operand, then the same
/// operands as an LLVM load/store instruction, then an offset node that
/// is added / subtracted from the base pointer to form the address (for
/// indexed memory ops).
LOAD, STORE,
/// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
/// to a specified boundary. This node always has two return values: a new
/// stack pointer value and a chain. The first operand is the token chain,
/// the second is the number of bytes to allocate, and the third is the
/// alignment boundary. The size is guaranteed to be a multiple of the
/// stack alignment, and the alignment is guaranteed to be bigger than the
/// stack alignment (if required) or 0 to get standard stack alignment.
DYNAMIC_STACKALLOC,
/// Control flow instructions. These all have token chains.
/// BR - Unconditional branch. The first operand is the chain
/// operand, the second is the MBB to branch to.
BR,
/// BRIND - Indirect branch. The first operand is the chain, the second
/// is the value to branch to, which must be of the same type as the
/// target's pointer type.
BRIND,
/// BR_JT - Jumptable branch. The first operand is the chain, the second
/// is the jumptable index, the last one is the jumptable entry index.
BR_JT,
/// BRCOND - Conditional branch. The first operand is the chain, the
/// second is the condition, the third is the block to branch to if the
/// condition is true. If the type of the condition is not i1, then the
/// high bits must conform to getBooleanContents.
BRCOND,
/// BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in
/// that the condition is represented as condition code, and two nodes to
/// compare, rather than as a combined SetCC node. The operands in order
/// are chain, cc, lhs, rhs, block to branch to if condition is true.
BR_CC,
/// INLINEASM - Represents an inline asm block. This node always has two
/// return values: a chain and a flag result. The inputs are as follows:
/// Operand #0 : Input chain.
/// Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string.
/// Operand #2 : a MDNodeSDNode with the !srcloc metadata.
/// Operand #3 : HasSideEffect, IsAlignStack bits.
/// After this, it is followed by a list of operands with this format:
/// ConstantSDNode: Flags that encode whether it is a mem or not, the
/// of operands that follow, etc. See InlineAsm.h.
/// ... however many operands ...
/// Operand #last: Optional, an incoming flag.
///
/// The variable width operands are required to represent target addressing
/// modes as a single "operand", even though they may have multiple
/// SDOperands.
INLINEASM,
/// EH_LABEL - Represents a label in mid basic block used to track
/// locations needed for debug and exception handling tables. These nodes
/// take a chain as input and return a chain.
EH_LABEL,
/// STACKSAVE - STACKSAVE has one operand, an input chain. It produces a
/// value, the same type as the pointer type for the system, and an output
/// chain.
STACKSAVE,
/// STACKRESTORE has two operands, an input chain and a pointer to restore
/// to it returns an output chain.
STACKRESTORE,
/// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
/// of a call sequence, and carry arbitrary information that target might
/// want to know. The first operand is a chain, the rest are specified by
/// the target and not touched by the DAG optimizers.
/// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
CALLSEQ_START, // Beginning of a call sequence
CALLSEQ_END, // End of a call sequence
/// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
/// and the alignment. It returns a pair of values: the vaarg value and a
/// new chain.
VAARG,
/// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
/// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
/// source.
VACOPY,
/// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
/// pointer, and a SRCVALUE.
VAEND, VASTART,
/// SRCVALUE - This is a node type that holds a Value* that is used to
/// make reference to a value in the LLVM IR.
SRCVALUE,
/// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
/// reference metadata in the IR.
MDNODE_SDNODE,
/// PCMARKER - This corresponds to the pcmarker intrinsic.
PCMARKER,
/// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
/// The only operand is a chain and a value and a chain are produced. The
/// value is the contents of the architecture specific cycle counter like
/// register (or other high accuracy low latency clock source)
READCYCLECOUNTER,
/// HANDLENODE node - Used as a handle for various purposes.
HANDLENODE,
/// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic. It
/// takes as input a token chain, the pointer to the trampoline, the pointer
/// to the nested function, the pointer to pass for the 'nest' parameter, a
/// SRCVALUE for the trampoline and another for the nested function
/// (allowing targets to access the original Function*).
/// It produces a token chain as output.
INIT_TRAMPOLINE,
/// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
/// It takes a pointer to the trampoline and produces a (possibly) new
/// pointer to the same trampoline with platform-specific adjustments
/// applied. The pointer it returns points to an executable block of code.
ADJUST_TRAMPOLINE,
/// TRAP - Trapping instruction
TRAP,
/// DEBUGTRAP - Trap intended to get the attention of a debugger.
DEBUGTRAP,
/// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
/// is the chain. The other operands are the address to prefetch,
/// read / write specifier, locality specifier and instruction / data cache
/// specifier.
PREFETCH,
/// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
/// This corresponds to the fence instruction. It takes an input chain, and
/// two integer constants: an AtomicOrdering and a SynchronizationScope.
ATOMIC_FENCE,
/// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
/// This corresponds to "load atomic" instruction.
ATOMIC_LOAD,
/// OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val)
/// This corresponds to "store atomic" instruction.
ATOMIC_STORE,
/// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
/// For double-word atomic operations:
/// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmpLo, cmpHi,
/// swapLo, swapHi)
/// This corresponds to the cmpxchg instruction.
ATOMIC_CMP_SWAP,
/// Val, Success, OUTCHAIN
/// = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap)
/// N.b. this is still a strong cmpxchg operation, so
/// Success == "Val == cmp".
ATOMIC_CMP_SWAP_WITH_SUCCESS,
/// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
/// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
/// For double-word atomic operations:
/// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi)
/// ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi)
/// These correspond to the atomicrmw instruction.
ATOMIC_SWAP,
ATOMIC_LOAD_ADD,
ATOMIC_LOAD_SUB,
ATOMIC_LOAD_AND,
ATOMIC_LOAD_OR,
ATOMIC_LOAD_XOR,
ATOMIC_LOAD_NAND,
ATOMIC_LOAD_MIN,
ATOMIC_LOAD_MAX,
ATOMIC_LOAD_UMIN,
ATOMIC_LOAD_UMAX,
// Masked load and store - consecutive vector load and store operations
// with additional mask operand that prevents memory accesses to the
// masked-off lanes.
MLOAD, MSTORE,
// Masked gather and scatter - load and store operations for a vector of
// random addresses with additional mask operand that prevents memory
// accesses to the masked-off lanes.
MGATHER, MSCATTER,
/// This corresponds to the llvm.lifetime.* intrinsics. The first operand
/// is the chain and the second operand is the alloca pointer.
LIFETIME_START, LIFETIME_END,
/// GC_TRANSITION_START/GC_TRANSITION_END - These operators mark the
/// beginning and end of GC transition sequence, and carry arbitrary
/// information that target might need for lowering. The first operand is
/// a chain, the rest are specified by the target and not touched by the DAG
/// optimizers. GC_TRANSITION_START..GC_TRANSITION_END pairs may not be
/// nested.
GC_TRANSITION_START,
GC_TRANSITION_END,
/// BUILTIN_OP_END - This must be the last enum value in this list.
/// The target-specific pre-isel opcode values start here.
BUILTIN_OP_END
};
/// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
/// which do not reference a specific memory location should be less than
/// this value. Those that do must not be less than this value, and can
/// be used with SelectionDAG::getMemIntrinsicNode.
static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END+300;
//===--------------------------------------------------------------------===//
/// MemIndexedMode enum - This enum defines the load / store indexed
/// addressing modes.
///
/// UNINDEXED "Normal" load / store. The effective address is already
/// computed and is available in the base pointer. The offset
/// operand is always undefined. In addition to producing a
/// chain, an unindexed load produces one value (result of the
/// load); an unindexed store does not produce a value.
///
/// PRE_INC Similar to the unindexed mode where the effective address is
/// PRE_DEC the value of the base pointer add / subtract the offset.
/// It considers the computation as being folded into the load /
/// store operation (i.e. the load / store does the address
/// computation as well as performing the memory transaction).
/// The base operand is always undefined. In addition to
/// producing a chain, pre-indexed load produces two values
/// (result of the load and the result of the address
/// computation); a pre-indexed store produces one value (result
/// of the address computation).
///
/// POST_INC The effective address is the value of the base pointer. The
/// POST_DEC value of the offset operand is then added to / subtracted
/// from the base after memory transaction. In addition to
/// producing a chain, post-indexed load produces two values
/// (the result of the load and the result of the base +/- offset
/// computation); a post-indexed store produces one value (the
/// the result of the base +/- offset computation).
enum MemIndexedMode {
UNINDEXED = 0,
PRE_INC,
PRE_DEC,
POST_INC,
POST_DEC,
LAST_INDEXED_MODE
};
//===--------------------------------------------------------------------===//
/// LoadExtType enum - This enum defines the three variants of LOADEXT
/// (load with extension).
///
/// SEXTLOAD loads the integer operand and sign extends it to a larger
/// integer result type.
/// ZEXTLOAD loads the integer operand and zero extends it to a larger
/// integer result type.
/// EXTLOAD is used for two things: floating point extending loads and
/// integer extending loads [the top bits are undefined].
enum LoadExtType {
NON_EXTLOAD = 0,
EXTLOAD,
SEXTLOAD,
ZEXTLOAD,
LAST_LOADEXT_TYPE
};
NodeType getExtForLoadExtType(bool IsFP, LoadExtType);
//===--------------------------------------------------------------------===//
/// ISD::CondCode enum - These are ordered carefully to make the bitfields
/// below work out, when considering SETFALSE (something that never exists
/// dynamically) as 0. "U" -> Unsigned (for integer operands) or Unordered
/// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
/// to. If the "N" column is 1, the result of the comparison is undefined if
/// the input is a NAN.
///
/// All of these (except for the 'always folded ops') should be handled for
/// floating point. For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
/// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
///
/// Note that these are laid out in a specific order to allow bit-twiddling
/// to transform conditions.
enum CondCode {
// Opcode N U L G E Intuitive operation
SETFALSE, // 0 0 0 0 Always false (always folded)
SETOEQ, // 0 0 0 1 True if ordered and equal
SETOGT, // 0 0 1 0 True if ordered and greater than
SETOGE, // 0 0 1 1 True if ordered and greater than or equal
SETOLT, // 0 1 0 0 True if ordered and less than
SETOLE, // 0 1 0 1 True if ordered and less than or equal
SETONE, // 0 1 1 0 True if ordered and operands are unequal
SETO, // 0 1 1 1 True if ordered (no nans)
SETUO, // 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
SETUEQ, // 1 0 0 1 True if unordered or equal
SETUGT, // 1 0 1 0 True if unordered or greater than
SETUGE, // 1 0 1 1 True if unordered, greater than, or equal
SETULT, // 1 1 0 0 True if unordered or less than
SETULE, // 1 1 0 1 True if unordered, less than, or equal
SETUNE, // 1 1 1 0 True if unordered or not equal
SETTRUE, // 1 1 1 1 Always true (always folded)
// Don't care operations: undefined if the input is a nan.
SETFALSE2, // 1 X 0 0 0 Always false (always folded)
SETEQ, // 1 X 0 0 1 True if equal
SETGT, // 1 X 0 1 0 True if greater than
SETGE, // 1 X 0 1 1 True if greater than or equal
SETLT, // 1 X 1 0 0 True if less than
SETLE, // 1 X 1 0 1 True if less than or equal
SETNE, // 1 X 1 1 0 True if not equal
SETTRUE2, // 1 X 1 1 1 Always true (always folded)
SETCC_INVALID // Marker value.
};
/// isSignedIntSetCC - Return true if this is a setcc instruction that
/// performs a signed comparison when used with integer operands.
inline bool isSignedIntSetCC(CondCode Code) {
return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
}
/// isUnsignedIntSetCC - Return true if this is a setcc instruction that
/// performs an unsigned comparison when used with integer operands.
inline bool isUnsignedIntSetCC(CondCode Code) {
return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
}
/// isTrueWhenEqual - Return true if the specified condition returns true if
/// the two operands to the condition are equal. Note that if one of the two
/// operands is a NaN, this value is meaningless.
inline bool isTrueWhenEqual(CondCode Cond) {
return ((int)Cond & 1) != 0;
}
/// getUnorderedFlavor - This function returns 0 if the condition is always
/// false if an operand is a NaN, 1 if the condition is always true if the
/// operand is a NaN, and 2 if the condition is undefined if the operand is a
/// NaN.
inline unsigned getUnorderedFlavor(CondCode Cond) {
return ((int)Cond >> 3) & 3;
}
/// getSetCCInverse - Return the operation corresponding to !(X op Y), where
/// 'op' is a valid SetCC operation.
CondCode getSetCCInverse(CondCode Operation, bool isInteger);
/// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
/// when given the operation for (X op Y).
CondCode getSetCCSwappedOperands(CondCode Operation);
/// getSetCCOrOperation - Return the result of a logical OR between different
/// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This
/// function returns SETCC_INVALID if it is not possible to represent the
/// resultant comparison.
CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, bool isInteger);
/// getSetCCAndOperation - Return the result of a logical AND between
/// different comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
/// function returns SETCC_INVALID if it is not possible to represent the
/// resultant comparison.
CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, bool isInteger);
//===--------------------------------------------------------------------===//
/// CvtCode enum - This enum defines the various converts CONVERT_RNDSAT
/// supports.
enum CvtCode {
CVT_FF, /// Float from Float
CVT_FS, /// Float from Signed
CVT_FU, /// Float from Unsigned
CVT_SF, /// Signed from Float
CVT_UF, /// Unsigned from Float
CVT_SS, /// Signed from Signed
CVT_SU, /// Signed from Unsigned
CVT_US, /// Unsigned from Signed
CVT_UU, /// Unsigned from Unsigned
CVT_INVALID /// Marker - Invalid opcode
};
} // end llvm::ISD namespace
} // end llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/FaultMaps.h | //===------------------- FaultMaps.h - The "FaultMaps" section --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_FAULTMAPS_H
#define LLVM_CODEGEN_FAULTMAPS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Format.h"
#include <vector>
#include <map>
namespace llvm {
class AsmPrinter;
class MCExpr;
class MCSymbol;
class MCStreamer;
class FaultMaps {
public:
enum FaultKind { FaultingLoad = 1, FaultKindMax };
static const char *faultTypeToString(FaultKind);
explicit FaultMaps(AsmPrinter &AP);
void recordFaultingOp(FaultKind FaultTy, const MCSymbol *HandlerLabel);
void serializeToFaultMapSection();
private:
static const char *WFMP;
struct FaultInfo {
FaultKind Kind;
const MCExpr *FaultingOffsetExpr;
const MCExpr *HandlerOffsetExpr;
FaultInfo()
: Kind(FaultKindMax), FaultingOffsetExpr(nullptr),
HandlerOffsetExpr(nullptr) {}
explicit FaultInfo(FaultMaps::FaultKind Kind, const MCExpr *FaultingOffset,
const MCExpr *HandlerOffset)
: Kind(Kind), FaultingOffsetExpr(FaultingOffset),
HandlerOffsetExpr(HandlerOffset) {}
};
typedef std::vector<FaultInfo> FunctionFaultInfos;
// We'd like to keep a stable iteration order for FunctionInfos to help
// FileCheck based testing.
struct MCSymbolComparator {
bool operator()(const MCSymbol *LHS, const MCSymbol *RHS) const {
return LHS->getName() < RHS->getName();
}
};
std::map<const MCSymbol *, FunctionFaultInfos, MCSymbolComparator>
FunctionInfos;
AsmPrinter &AP;
void emitFunctionInfo(const MCSymbol *FnLabel, const FunctionFaultInfos &FFI);
};
/// A parser for the __llvm_faultmaps section generated by the FaultMaps class
/// above. This parser is version locked with with the __llvm_faultmaps section
/// generated by the version of LLVM that includes it. No guarantees are made
/// with respect to forward or backward compatibility.
class FaultMapParser {
typedef uint8_t FaultMapVersionType;
static const size_t FaultMapVersionOffset = 0;
typedef uint8_t Reserved0Type;
static const size_t Reserved0Offset =
FaultMapVersionOffset + sizeof(FaultMapVersionType);
typedef uint16_t Reserved1Type;
static const size_t Reserved1Offset = Reserved0Offset + sizeof(Reserved0Type);
typedef uint32_t NumFunctionsType;
static const size_t NumFunctionsOffset =
Reserved1Offset + sizeof(Reserved1Type);
static const size_t FunctionInfosOffset =
NumFunctionsOffset + sizeof(NumFunctionsType);
const uint8_t *P;
const uint8_t *E;
template <typename T> static T read(const uint8_t *P, const uint8_t *E) {
assert(P + sizeof(T) <= E && "out of bounds read!");
return support::endian::read<T, support::little, 1>(P);
}
public:
class FunctionFaultInfoAccessor {
typedef uint32_t FaultKindType;
static const size_t FaultKindOffset = 0;
typedef uint32_t FaultingPCOffsetType;
static const size_t FaultingPCOffsetOffset =
FaultKindOffset + sizeof(FaultKindType);
typedef uint32_t HandlerPCOffsetType;
static const size_t HandlerPCOffsetOffset =
FaultingPCOffsetOffset + sizeof(FaultingPCOffsetType);
const uint8_t *P;
const uint8_t *E;
public:
static const size_t Size =
HandlerPCOffsetOffset + sizeof(HandlerPCOffsetType);
explicit FunctionFaultInfoAccessor(const uint8_t *P, const uint8_t *E)
: P(P), E(E) {}
FaultKindType getFaultKind() const {
return read<FaultKindType>(P + FaultKindOffset, E);
}
FaultingPCOffsetType getFaultingPCOffset() const {
return read<FaultingPCOffsetType>(P + FaultingPCOffsetOffset, E);
}
HandlerPCOffsetType getHandlerPCOffset() const {
return read<HandlerPCOffsetType>(P + HandlerPCOffsetOffset, E);
}
};
class FunctionInfoAccessor {
typedef uint64_t FunctionAddrType;
static const size_t FunctionAddrOffset = 0;
typedef uint32_t NumFaultingPCsType;
static const size_t NumFaultingPCsOffset =
FunctionAddrOffset + sizeof(FunctionAddrType);
typedef uint32_t ReservedType;
static const size_t ReservedOffset =
NumFaultingPCsOffset + sizeof(NumFaultingPCsType);
static const size_t FunctionFaultInfosOffset =
ReservedOffset + sizeof(ReservedType);
static const size_t FunctionInfoHeaderSize = FunctionFaultInfosOffset;
const uint8_t *P;
const uint8_t *E;
public:
FunctionInfoAccessor() : P(nullptr), E(nullptr) {}
explicit FunctionInfoAccessor(const uint8_t *P, const uint8_t *E)
: P(P), E(E) {}
FunctionAddrType getFunctionAddr() const {
return read<FunctionAddrType>(P + FunctionAddrOffset, E);
}
NumFaultingPCsType getNumFaultingPCs() const {
return read<NumFaultingPCsType>(P + NumFaultingPCsOffset, E);
}
FunctionFaultInfoAccessor getFunctionFaultInfoAt(uint32_t Index) const {
assert(Index < getNumFaultingPCs() && "index out of bounds!");
const uint8_t *Begin = P + FunctionFaultInfosOffset +
FunctionFaultInfoAccessor::Size * Index;
return FunctionFaultInfoAccessor(Begin, E);
}
FunctionInfoAccessor getNextFunctionInfo() const {
size_t MySize = FunctionInfoHeaderSize +
getNumFaultingPCs() * FunctionFaultInfoAccessor::Size;
const uint8_t *Begin = P + MySize;
assert(Begin < E && "out of bounds!");
return FunctionInfoAccessor(Begin, E);
}
};
explicit FaultMapParser(const uint8_t *Begin, const uint8_t *End)
: P(Begin), E(End) {}
FaultMapVersionType getFaultMapVersion() const {
auto Version = read<FaultMapVersionType>(P + FaultMapVersionOffset, E);
assert(Version == 1 && "only version 1 supported!");
return Version;
}
NumFunctionsType getNumFunctions() const {
return read<NumFunctionsType>(P + NumFunctionsOffset, E);
}
FunctionInfoAccessor getFirstFunctionInfo() const {
const uint8_t *Begin = P + FunctionInfosOffset;
return FunctionInfoAccessor(Begin, E);
}
};
raw_ostream &
operator<<(raw_ostream &OS, const FaultMapParser::FunctionFaultInfoAccessor &);
raw_ostream &operator<<(raw_ostream &OS,
const FaultMapParser::FunctionInfoAccessor &);
raw_ostream &operator<<(raw_ostream &OS, const FaultMapParser &);
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/LiveIntervalAnalysis.h | //===-- LiveIntervalAnalysis.h - Live Interval Analysis ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the LiveInterval analysis pass. Given some numbering of
// each the machine instructions (in this implemention depth-first order) an
// interval [i, j) is said to be a live interval for register v if there is no
// instruction with number j' > j such that v is live at j' and there is no
// instruction with number i' < i such that v is live at i'. In this
// implementation intervals can have holes, i.e. an interval might look like
// [1,20), [50,65), [1000,1001).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
#define LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <cmath>
#include <iterator>
namespace llvm {
extern cl::opt<bool> UseSegmentSetForPhysRegs;
class AliasAnalysis;
class BitVector;
class BlockFrequency;
class LiveRangeCalc;
class LiveVariables;
class MachineDominatorTree;
class MachineLoopInfo;
class TargetRegisterInfo;
class MachineRegisterInfo;
class TargetInstrInfo;
class TargetRegisterClass;
class VirtRegMap;
class MachineBlockFrequencyInfo;
class LiveIntervals : public MachineFunctionPass {
MachineFunction* MF;
MachineRegisterInfo* MRI;
const TargetRegisterInfo* TRI;
const TargetInstrInfo* TII;
AliasAnalysis *AA;
SlotIndexes* Indexes;
MachineDominatorTree *DomTree;
LiveRangeCalc *LRCalc;
/// Special pool allocator for VNInfo's (LiveInterval val#).
///
VNInfo::Allocator VNInfoAllocator;
/// Live interval pointers for all the virtual registers.
IndexedMap<LiveInterval*, VirtReg2IndexFunctor> VirtRegIntervals;
/// RegMaskSlots - Sorted list of instructions with register mask operands.
/// Always use the 'r' slot, RegMasks are normal clobbers, not early
/// clobbers.
SmallVector<SlotIndex, 8> RegMaskSlots;
/// RegMaskBits - This vector is parallel to RegMaskSlots, it holds a
/// pointer to the corresponding register mask. This pointer can be
/// recomputed as:
///
/// MI = Indexes->getInstructionFromIndex(RegMaskSlot[N]);
/// unsigned OpNum = findRegMaskOperand(MI);
/// RegMaskBits[N] = MI->getOperand(OpNum).getRegMask();
///
/// This is kept in a separate vector partly because some standard
/// libraries don't support lower_bound() with mixed objects, partly to
/// improve locality when searching in RegMaskSlots.
/// Also see the comment in LiveInterval::find().
SmallVector<const uint32_t*, 8> RegMaskBits;
/// For each basic block number, keep (begin, size) pairs indexing into the
/// RegMaskSlots and RegMaskBits arrays.
/// Note that basic block numbers may not be layout contiguous, that's why
/// we can't just keep track of the first register mask in each basic
/// block.
SmallVector<std::pair<unsigned, unsigned>, 8> RegMaskBlocks;
/// Keeps a live range set for each register unit to track fixed physreg
/// interference.
SmallVector<LiveRange*, 0> RegUnitRanges;
public:
static char ID; // Pass identification, replacement for typeid
LiveIntervals();
~LiveIntervals() override;
// Calculate the spill weight to assign to a single instruction.
static float getSpillWeight(bool isDef, bool isUse,
const MachineBlockFrequencyInfo *MBFI,
const MachineInstr *Instr);
LiveInterval &getInterval(unsigned Reg) {
if (hasInterval(Reg))
return *VirtRegIntervals[Reg];
else
return createAndComputeVirtRegInterval(Reg);
}
const LiveInterval &getInterval(unsigned Reg) const {
return const_cast<LiveIntervals*>(this)->getInterval(Reg);
}
bool hasInterval(unsigned Reg) const {
return VirtRegIntervals.inBounds(Reg) && VirtRegIntervals[Reg];
}
// Interval creation.
LiveInterval &createEmptyInterval(unsigned Reg) {
assert(!hasInterval(Reg) && "Interval already exists!");
VirtRegIntervals.grow(Reg);
VirtRegIntervals[Reg] = createInterval(Reg);
return *VirtRegIntervals[Reg];
}
LiveInterval &createAndComputeVirtRegInterval(unsigned Reg) {
LiveInterval &LI = createEmptyInterval(Reg);
computeVirtRegInterval(LI);
return LI;
}
// Interval removal.
void removeInterval(unsigned Reg) {
delete VirtRegIntervals[Reg];
VirtRegIntervals[Reg] = nullptr;
}
/// Given a register and an instruction, adds a live segment from that
/// instruction to the end of its MBB.
LiveInterval::Segment addSegmentToEndOfBlock(unsigned reg,
MachineInstr* startInst);
/// shrinkToUses - After removing some uses of a register, shrink its live
/// range to just the remaining uses. This method does not compute reaching
/// defs for new uses, and it doesn't remove dead defs.
/// Dead PHIDef values are marked as unused.
/// New dead machine instructions are added to the dead vector.
/// Return true if the interval may have been separated into multiple
/// connected components.
bool shrinkToUses(LiveInterval *li,
SmallVectorImpl<MachineInstr*> *dead = nullptr);
/// Specialized version of
/// shrinkToUses(LiveInterval *li, SmallVectorImpl<MachineInstr*> *dead)
/// that works on a subregister live range and only looks at uses matching
/// the lane mask of the subregister range.
void shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg);
/// extendToIndices - Extend the live range of LI to reach all points in
/// Indices. The points in the Indices array must be jointly dominated by
/// existing defs in LI. PHI-defs are added as needed to maintain SSA form.
///
/// If a SlotIndex in Indices is the end index of a basic block, LI will be
/// extended to be live out of the basic block.
///
/// See also LiveRangeCalc::extend().
void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices);
/// If @p LR has a live value at @p Kill, prune its live range by removing
/// any liveness reachable from Kill. Add live range end points to
/// EndPoints such that extendToIndices(LI, EndPoints) will reconstruct the
/// value's live range.
///
/// Calling pruneValue() and extendToIndices() can be used to reconstruct
/// SSA form after adding defs to a virtual register.
void pruneValue(LiveRange &LR, SlotIndex Kill,
SmallVectorImpl<SlotIndex> *EndPoints);
SlotIndexes *getSlotIndexes() const {
return Indexes;
}
AliasAnalysis *getAliasAnalysis() const {
return AA;
}
/// isNotInMIMap - returns true if the specified machine instr has been
/// removed or was never entered in the map.
bool isNotInMIMap(const MachineInstr* Instr) const {
return !Indexes->hasIndex(Instr);
}
/// Returns the base index of the given instruction.
SlotIndex getInstructionIndex(const MachineInstr *instr) const {
return Indexes->getInstructionIndex(instr);
}
/// Returns the instruction associated with the given index.
MachineInstr* getInstructionFromIndex(SlotIndex index) const {
return Indexes->getInstructionFromIndex(index);
}
/// Return the first index in the given basic block.
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
return Indexes->getMBBStartIdx(mbb);
}
/// Return the last index in the given basic block.
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
return Indexes->getMBBEndIdx(mbb);
}
bool isLiveInToMBB(const LiveRange &LR,
const MachineBasicBlock *mbb) const {
return LR.liveAt(getMBBStartIdx(mbb));
}
bool isLiveOutOfMBB(const LiveRange &LR,
const MachineBasicBlock *mbb) const {
return LR.liveAt(getMBBEndIdx(mbb).getPrevSlot());
}
MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
return Indexes->getMBBFromIndex(index);
}
void insertMBBInMaps(MachineBasicBlock *MBB) {
Indexes->insertMBBInMaps(MBB);
assert(unsigned(MBB->getNumber()) == RegMaskBlocks.size() &&
"Blocks must be added in order.");
RegMaskBlocks.push_back(std::make_pair(RegMaskSlots.size(), 0));
}
SlotIndex InsertMachineInstrInMaps(MachineInstr *MI) {
return Indexes->insertMachineInstrInMaps(MI);
}
void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B,
MachineBasicBlock::iterator E) {
for (MachineBasicBlock::iterator I = B; I != E; ++I)
Indexes->insertMachineInstrInMaps(I);
}
void RemoveMachineInstrFromMaps(MachineInstr *MI) {
Indexes->removeMachineInstrFromMaps(MI);
}
void ReplaceMachineInstrInMaps(MachineInstr *MI, MachineInstr *NewMI) {
Indexes->replaceMachineInstrInMaps(MI, NewMI);
}
bool findLiveInMBBs(SlotIndex Start, SlotIndex End,
SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
return Indexes->findLiveInMBBs(Start, End, MBBs);
}
VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
void getAnalysisUsage(AnalysisUsage &AU) const override;
void releaseMemory() override;
/// runOnMachineFunction - pass entry point
bool runOnMachineFunction(MachineFunction&) override;
/// print - Implement the dump method.
void print(raw_ostream &O, const Module* = nullptr) const override;
/// intervalIsInOneMBB - If LI is confined to a single basic block, return
/// a pointer to that block. If LI is live in to or out of any block,
/// return NULL.
MachineBasicBlock *intervalIsInOneMBB(const LiveInterval &LI) const;
/// Returns true if VNI is killed by any PHI-def values in LI.
/// This may conservatively return true to avoid expensive computations.
bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const;
/// addKillFlags - Add kill flags to any instruction that kills a virtual
/// register.
void addKillFlags(const VirtRegMap*);
/// handleMove - call this method to notify LiveIntervals that
/// instruction 'mi' has been moved within a basic block. This will update
/// the live intervals for all operands of mi. Moves between basic blocks
/// are not supported.
///
/// \param UpdateFlags Update live intervals for nonallocatable physregs.
void handleMove(MachineInstr* MI, bool UpdateFlags = false);
/// moveIntoBundle - Update intervals for operands of MI so that they
/// begin/end on the SlotIndex for BundleStart.
///
/// \param UpdateFlags Update live intervals for nonallocatable physregs.
///
/// Requires MI and BundleStart to have SlotIndexes, and assumes
/// existing liveness is accurate. BundleStart should be the first
/// instruction in the Bundle.
void handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart,
bool UpdateFlags = false);
/// repairIntervalsInRange - Update live intervals for instructions in a
/// range of iterators. It is intended for use after target hooks that may
/// insert or remove instructions, and is only efficient for a small number
/// of instructions.
///
/// OrigRegs is a vector of registers that were originally used by the
/// instructions in the range between the two iterators.
///
/// Currently, the only only changes that are supported are simple removal
/// and addition of uses.
void repairIntervalsInRange(MachineBasicBlock *MBB,
MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
ArrayRef<unsigned> OrigRegs);
// Register mask functions.
//
// Machine instructions may use a register mask operand to indicate that a
// large number of registers are clobbered by the instruction. This is
// typically used for calls.
//
// For compile time performance reasons, these clobbers are not recorded in
// the live intervals for individual physical registers. Instead,
// LiveIntervalAnalysis maintains a sorted list of instructions with
// register mask operands.
/// getRegMaskSlots - Returns a sorted array of slot indices of all
/// instructions with register mask operands.
ArrayRef<SlotIndex> getRegMaskSlots() const { return RegMaskSlots; }
/// getRegMaskSlotsInBlock - Returns a sorted array of slot indices of all
/// instructions with register mask operands in the basic block numbered
/// MBBNum.
ArrayRef<SlotIndex> getRegMaskSlotsInBlock(unsigned MBBNum) const {
std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
return getRegMaskSlots().slice(P.first, P.second);
}
/// getRegMaskBits() - Returns an array of register mask pointers
/// corresponding to getRegMaskSlots().
ArrayRef<const uint32_t*> getRegMaskBits() const { return RegMaskBits; }
/// getRegMaskBitsInBlock - Returns an array of mask pointers corresponding
/// to getRegMaskSlotsInBlock(MBBNum).
ArrayRef<const uint32_t*> getRegMaskBitsInBlock(unsigned MBBNum) const {
std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
return getRegMaskBits().slice(P.first, P.second);
}
/// checkRegMaskInterference - Test if LI is live across any register mask
/// instructions, and compute a bit mask of physical registers that are not
/// clobbered by any of them.
///
/// Returns false if LI doesn't cross any register mask instructions. In
/// that case, the bit vector is not filled in.
bool checkRegMaskInterference(LiveInterval &LI,
BitVector &UsableRegs);
// Register unit functions.
//
// Fixed interference occurs when MachineInstrs use physregs directly
// instead of virtual registers. This typically happens when passing
// arguments to a function call, or when instructions require operands in
// fixed registers.
//
// Each physreg has one or more register units, see MCRegisterInfo. We
// track liveness per register unit to handle aliasing registers more
// efficiently.
/// getRegUnit - Return the live range for Unit.
/// It will be computed if it doesn't exist.
LiveRange &getRegUnit(unsigned Unit) {
LiveRange *LR = RegUnitRanges[Unit];
if (!LR) {
// Compute missing ranges on demand.
// Use segment set to speed-up initial computation of the live range.
RegUnitRanges[Unit] = LR = new LiveRange(UseSegmentSetForPhysRegs);
computeRegUnitRange(*LR, Unit);
}
return *LR;
}
/// getCachedRegUnit - Return the live range for Unit if it has already
/// been computed, or NULL if it hasn't been computed yet.
LiveRange *getCachedRegUnit(unsigned Unit) {
return RegUnitRanges[Unit];
}
const LiveRange *getCachedRegUnit(unsigned Unit) const {
return RegUnitRanges[Unit];
}
/// Remove value numbers and related live segments starting at position
/// @p Pos that are part of any liverange of physical register @p Reg or one
/// of its subregisters.
void removePhysRegDefAt(unsigned Reg, SlotIndex Pos);
/// Remove value number and related live segments of @p LI and its subranges
/// that start at position @p Pos.
void removeVRegDefAt(LiveInterval &LI, SlotIndex Pos);
private:
/// Compute live intervals for all virtual registers.
void computeVirtRegs();
/// Compute RegMaskSlots and RegMaskBits.
void computeRegMasks();
/// Walk the values in @p LI and check for dead values:
/// - Dead PHIDef values are marked as unused.
/// - Dead operands are marked as such.
/// - Completely dead machine instructions are added to the @p dead vector
/// if it is not nullptr.
/// Returns true if any PHI value numbers have been removed which may
/// have separated the interval into multiple connected components.
bool computeDeadValues(LiveInterval &LI,
SmallVectorImpl<MachineInstr*> *dead);
static LiveInterval* createInterval(unsigned Reg);
void printInstrs(raw_ostream &O) const;
void dumpInstrs() const;
void computeLiveInRegUnits();
void computeRegUnitRange(LiveRange&, unsigned Unit);
void computeVirtRegInterval(LiveInterval&);
/// Helper function for repairIntervalsInRange(), walks backwards and
/// creates/modifies live segments in @p LR to match the operands found.
/// Only full operands or operands with subregisters matching @p LaneMask
/// are considered.
void repairOldRegInRange(MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
const SlotIndex endIdx, LiveRange &LR,
unsigned Reg, unsigned LaneMask = ~0u);
class HMEditor;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/LiveRangeEdit.h | //===---- LiveRangeEdit.h - Basic tools for split and spill -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The LiveRangeEdit class represents changes done to a virtual register when it
// is spilled or split.
//
// The parent register is never changed. Instead, a number of new virtual
// registers are created and added to the newRegs vector.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LIVERANGEEDIT_H
#define LLVM_CODEGEN_LIVERANGEEDIT_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
namespace llvm {
class AliasAnalysis;
class LiveIntervals;
class MachineBlockFrequencyInfo;
class MachineLoopInfo;
class VirtRegMap;
class LiveRangeEdit : private MachineRegisterInfo::Delegate {
public:
/// Callback methods for LiveRangeEdit owners.
class Delegate {
virtual void anchor();
public:
/// Called immediately before erasing a dead machine instruction.
virtual void LRE_WillEraseInstruction(MachineInstr *MI) {}
/// Called when a virtual register is no longer used. Return false to defer
/// its deletion from LiveIntervals.
virtual bool LRE_CanEraseVirtReg(unsigned) { return true; }
/// Called before shrinking the live range of a virtual register.
virtual void LRE_WillShrinkVirtReg(unsigned) {}
/// Called after cloning a virtual register.
/// This is used for new registers representing connected components of Old.
virtual void LRE_DidCloneVirtReg(unsigned New, unsigned Old) {}
virtual ~Delegate() {}
};
private:
LiveInterval *Parent;
SmallVectorImpl<unsigned> &NewRegs;
MachineRegisterInfo &MRI;
LiveIntervals &LIS;
VirtRegMap *VRM;
const TargetInstrInfo &TII;
Delegate *const TheDelegate;
/// FirstNew - Index of the first register added to NewRegs.
const unsigned FirstNew;
/// ScannedRemattable - true when remattable values have been identified.
bool ScannedRemattable;
/// Remattable - Values defined by remattable instructions as identified by
/// tii.isTriviallyReMaterializable().
SmallPtrSet<const VNInfo*,4> Remattable;
/// Rematted - Values that were actually rematted, and so need to have their
/// live range trimmed or entirely removed.
SmallPtrSet<const VNInfo*,4> Rematted;
/// scanRemattable - Identify the Parent values that may rematerialize.
void scanRemattable(AliasAnalysis *aa);
/// allUsesAvailableAt - Return true if all registers used by OrigMI at
/// OrigIdx are also available with the same value at UseIdx.
bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
SlotIndex UseIdx) const;
/// foldAsLoad - If LI has a single use and a single def that can be folded as
/// a load, eliminate the register by folding the def into the use.
bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr*> &Dead);
typedef SetVector<LiveInterval*,
SmallVector<LiveInterval*, 8>,
SmallPtrSet<LiveInterval*, 8> > ToShrinkSet;
/// Helper for eliminateDeadDefs.
void eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink);
/// MachineRegisterInfo callback to notify when new virtual
/// registers are created.
void MRI_NoteNewVirtualRegister(unsigned VReg) override;
/// \brief Check if MachineOperand \p MO is a last use/kill either in the
/// main live range of \p LI or in one of the matching subregister ranges.
bool useIsKill(const LiveInterval &LI, const MachineOperand &MO) const;
public:
/// Create a LiveRangeEdit for breaking down parent into smaller pieces.
/// @param parent The register being spilled or split.
/// @param newRegs List to receive any new registers created. This needn't be
/// empty initially, any existing registers are ignored.
/// @param MF The MachineFunction the live range edit is taking place in.
/// @param lis The collection of all live intervals in this function.
/// @param vrm Map of virtual registers to physical registers for this
/// function. If NULL, no virtual register map updates will
/// be done. This could be the case if called before Regalloc.
LiveRangeEdit(LiveInterval *parent, SmallVectorImpl<unsigned> &newRegs,
MachineFunction &MF, LiveIntervals &lis, VirtRegMap *vrm,
Delegate *delegate = nullptr)
: Parent(parent), NewRegs(newRegs), MRI(MF.getRegInfo()), LIS(lis),
VRM(vrm), TII(*MF.getSubtarget().getInstrInfo()),
TheDelegate(delegate), FirstNew(newRegs.size()),
ScannedRemattable(false) {
MRI.setDelegate(this);
}
~LiveRangeEdit() override { MRI.resetDelegate(this); }
LiveInterval &getParent() const {
assert(Parent && "No parent LiveInterval");
return *Parent;
}
unsigned getReg() const { return getParent().reg; }
/// Iterator for accessing the new registers added by this edit.
typedef SmallVectorImpl<unsigned>::const_iterator iterator;
iterator begin() const { return NewRegs.begin()+FirstNew; }
iterator end() const { return NewRegs.end(); }
unsigned size() const { return NewRegs.size()-FirstNew; }
bool empty() const { return size() == 0; }
unsigned get(unsigned idx) const { return NewRegs[idx+FirstNew]; }
ArrayRef<unsigned> regs() const {
return makeArrayRef(NewRegs).slice(FirstNew);
}
/// createEmptyIntervalFrom - Create a new empty interval based on OldReg.
LiveInterval &createEmptyIntervalFrom(unsigned OldReg);
/// createFrom - Create a new virtual register based on OldReg.
unsigned createFrom(unsigned OldReg);
/// create - Create a new register with the same class and original slot as
/// parent.
LiveInterval &createEmptyInterval() {
return createEmptyIntervalFrom(getReg());
}
unsigned create() {
return createFrom(getReg());
}
/// anyRematerializable - Return true if any parent values may be
/// rematerializable.
/// This function must be called before any rematerialization is attempted.
bool anyRematerializable(AliasAnalysis*);
/// checkRematerializable - Manually add VNI to the list of rematerializable
/// values if DefMI may be rematerializable.
bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI,
AliasAnalysis*);
/// Remat - Information needed to rematerialize at a specific location.
struct Remat {
VNInfo *ParentVNI; // parent_'s value at the remat location.
MachineInstr *OrigMI; // Instruction defining ParentVNI.
explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI), OrigMI(nullptr) {}
};
/// canRematerializeAt - Determine if ParentVNI can be rematerialized at
/// UseIdx. It is assumed that parent_.getVNINfoAt(UseIdx) == ParentVNI.
/// When cheapAsAMove is set, only cheap remats are allowed.
bool canRematerializeAt(Remat &RM,
SlotIndex UseIdx,
bool cheapAsAMove);
/// rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an
/// instruction into MBB before MI. The new instruction is mapped, but
/// liveness is not updated.
/// Return the SlotIndex of the new instruction.
SlotIndex rematerializeAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg,
const Remat &RM,
const TargetRegisterInfo&,
bool Late = false);
/// markRematerialized - explicitly mark a value as rematerialized after doing
/// it manually.
void markRematerialized(const VNInfo *ParentVNI) {
Rematted.insert(ParentVNI);
}
/// didRematerialize - Return true if ParentVNI was rematerialized anywhere.
bool didRematerialize(const VNInfo *ParentVNI) const {
return Rematted.count(ParentVNI);
}
/// eraseVirtReg - Notify the delegate that Reg is no longer in use, and try
/// to erase it from LIS.
void eraseVirtReg(unsigned Reg);
/// eliminateDeadDefs - Try to delete machine instructions that are now dead
/// (allDefsAreDead returns true). This may cause live intervals to be trimmed
/// and further dead efs to be eliminated.
/// RegsBeingSpilled lists registers currently being spilled by the register
/// allocator. These registers should not be split into new intervals
/// as currently those new intervals are not guaranteed to spill.
void eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
ArrayRef<unsigned> RegsBeingSpilled = None);
/// calculateRegClassAndHint - Recompute register class and hint for each new
/// register.
void calculateRegClassAndHint(MachineFunction&,
const MachineLoopInfo&,
const MachineBlockFrequencyInfo&);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/RuntimeLibcalls.h | //===-- CodeGen/RuntimeLibcalls.h - Runtime Library Calls -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the enum representing the list of runtime library calls
// the backend may emit during code generation, and also some helper functions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_RUNTIMELIBCALLS_H
#define LLVM_CODEGEN_RUNTIMELIBCALLS_H
#include "llvm/CodeGen/ValueTypes.h"
namespace llvm {
namespace RTLIB {
/// RTLIB::Libcall enum - This enum defines all of the runtime library calls
/// the backend can emit. The various long double types cannot be merged,
/// because 80-bit library functions use "xf" and 128-bit use "tf".
///
/// When adding PPCF128 functions here, note that their names generally need
/// to be overridden for Darwin with the xxx$LDBL128 form. See
/// PPCISelLowering.cpp.
///
enum Libcall {
// Integer
SHL_I16,
SHL_I32,
SHL_I64,
SHL_I128,
SRL_I16,
SRL_I32,
SRL_I64,
SRL_I128,
SRA_I16,
SRA_I32,
SRA_I64,
SRA_I128,
MUL_I8,
MUL_I16,
MUL_I32,
MUL_I64,
MUL_I128,
MULO_I32,
MULO_I64,
MULO_I128,
SDIV_I8,
SDIV_I16,
SDIV_I32,
SDIV_I64,
SDIV_I128,
UDIV_I8,
UDIV_I16,
UDIV_I32,
UDIV_I64,
UDIV_I128,
SREM_I8,
SREM_I16,
SREM_I32,
SREM_I64,
SREM_I128,
UREM_I8,
UREM_I16,
UREM_I32,
UREM_I64,
UREM_I128,
SDIVREM_I8,
SDIVREM_I16,
SDIVREM_I32,
SDIVREM_I64,
SDIVREM_I128,
UDIVREM_I8,
UDIVREM_I16,
UDIVREM_I32,
UDIVREM_I64,
UDIVREM_I128,
NEG_I32,
NEG_I64,
// FLOATING POINT
ADD_F32,
ADD_F64,
ADD_F80,
ADD_F128,
ADD_PPCF128,
SUB_F32,
SUB_F64,
SUB_F80,
SUB_F128,
SUB_PPCF128,
MUL_F32,
MUL_F64,
MUL_F80,
MUL_F128,
MUL_PPCF128,
DIV_F32,
DIV_F64,
DIV_F80,
DIV_F128,
DIV_PPCF128,
REM_F32,
REM_F64,
REM_F80,
REM_F128,
REM_PPCF128,
FMA_F32,
FMA_F64,
FMA_F80,
FMA_F128,
FMA_PPCF128,
POWI_F32,
POWI_F64,
POWI_F80,
POWI_F128,
POWI_PPCF128,
SQRT_F32,
SQRT_F64,
SQRT_F80,
SQRT_F128,
SQRT_PPCF128,
LOG_F32,
LOG_F64,
LOG_F80,
LOG_F128,
LOG_PPCF128,
LOG2_F32,
LOG2_F64,
LOG2_F80,
LOG2_F128,
LOG2_PPCF128,
LOG10_F32,
LOG10_F64,
LOG10_F80,
LOG10_F128,
LOG10_PPCF128,
EXP_F32,
EXP_F64,
EXP_F80,
EXP_F128,
EXP_PPCF128,
EXP2_F32,
EXP2_F64,
EXP2_F80,
EXP2_F128,
EXP2_PPCF128,
SIN_F32,
SIN_F64,
SIN_F80,
SIN_F128,
SIN_PPCF128,
COS_F32,
COS_F64,
COS_F80,
COS_F128,
COS_PPCF128,
SINCOS_F32,
SINCOS_F64,
SINCOS_F80,
SINCOS_F128,
SINCOS_PPCF128,
POW_F32,
POW_F64,
POW_F80,
POW_F128,
POW_PPCF128,
CEIL_F32,
CEIL_F64,
CEIL_F80,
CEIL_F128,
CEIL_PPCF128,
TRUNC_F32,
TRUNC_F64,
TRUNC_F80,
TRUNC_F128,
TRUNC_PPCF128,
RINT_F32,
RINT_F64,
RINT_F80,
RINT_F128,
RINT_PPCF128,
NEARBYINT_F32,
NEARBYINT_F64,
NEARBYINT_F80,
NEARBYINT_F128,
NEARBYINT_PPCF128,
ROUND_F32,
ROUND_F64,
ROUND_F80,
ROUND_F128,
ROUND_PPCF128,
FLOOR_F32,
FLOOR_F64,
FLOOR_F80,
FLOOR_F128,
FLOOR_PPCF128,
COPYSIGN_F32,
COPYSIGN_F64,
COPYSIGN_F80,
COPYSIGN_F128,
COPYSIGN_PPCF128,
FMIN_F32,
FMIN_F64,
FMIN_F80,
FMIN_F128,
FMIN_PPCF128,
FMAX_F32,
FMAX_F64,
FMAX_F80,
FMAX_F128,
FMAX_PPCF128,
// CONVERSION
FPEXT_F64_F128,
FPEXT_F32_F128,
FPEXT_F32_F64,
FPEXT_F16_F32,
FPROUND_F32_F16,
FPROUND_F64_F16,
FPROUND_F80_F16,
FPROUND_F128_F16,
FPROUND_PPCF128_F16,
FPROUND_F64_F32,
FPROUND_F80_F32,
FPROUND_F128_F32,
FPROUND_PPCF128_F32,
FPROUND_F80_F64,
FPROUND_F128_F64,
FPROUND_PPCF128_F64,
FPTOSINT_F32_I8,
FPTOSINT_F32_I16,
FPTOSINT_F32_I32,
FPTOSINT_F32_I64,
FPTOSINT_F32_I128,
FPTOSINT_F64_I8,
FPTOSINT_F64_I16,
FPTOSINT_F64_I32,
FPTOSINT_F64_I64,
FPTOSINT_F64_I128,
FPTOSINT_F80_I32,
FPTOSINT_F80_I64,
FPTOSINT_F80_I128,
FPTOSINT_F128_I32,
FPTOSINT_F128_I64,
FPTOSINT_F128_I128,
FPTOSINT_PPCF128_I32,
FPTOSINT_PPCF128_I64,
FPTOSINT_PPCF128_I128,
FPTOUINT_F32_I8,
FPTOUINT_F32_I16,
FPTOUINT_F32_I32,
FPTOUINT_F32_I64,
FPTOUINT_F32_I128,
FPTOUINT_F64_I8,
FPTOUINT_F64_I16,
FPTOUINT_F64_I32,
FPTOUINT_F64_I64,
FPTOUINT_F64_I128,
FPTOUINT_F80_I32,
FPTOUINT_F80_I64,
FPTOUINT_F80_I128,
FPTOUINT_F128_I32,
FPTOUINT_F128_I64,
FPTOUINT_F128_I128,
FPTOUINT_PPCF128_I32,
FPTOUINT_PPCF128_I64,
FPTOUINT_PPCF128_I128,
SINTTOFP_I32_F32,
SINTTOFP_I32_F64,
SINTTOFP_I32_F80,
SINTTOFP_I32_F128,
SINTTOFP_I32_PPCF128,
SINTTOFP_I64_F32,
SINTTOFP_I64_F64,
SINTTOFP_I64_F80,
SINTTOFP_I64_F128,
SINTTOFP_I64_PPCF128,
SINTTOFP_I128_F32,
SINTTOFP_I128_F64,
SINTTOFP_I128_F80,
SINTTOFP_I128_F128,
SINTTOFP_I128_PPCF128,
UINTTOFP_I32_F32,
UINTTOFP_I32_F64,
UINTTOFP_I32_F80,
UINTTOFP_I32_F128,
UINTTOFP_I32_PPCF128,
UINTTOFP_I64_F32,
UINTTOFP_I64_F64,
UINTTOFP_I64_F80,
UINTTOFP_I64_F128,
UINTTOFP_I64_PPCF128,
UINTTOFP_I128_F32,
UINTTOFP_I128_F64,
UINTTOFP_I128_F80,
UINTTOFP_I128_F128,
UINTTOFP_I128_PPCF128,
// COMPARISON
OEQ_F32,
OEQ_F64,
OEQ_F128,
UNE_F32,
UNE_F64,
UNE_F128,
OGE_F32,
OGE_F64,
OGE_F128,
OLT_F32,
OLT_F64,
OLT_F128,
OLE_F32,
OLE_F64,
OLE_F128,
OGT_F32,
OGT_F64,
OGT_F128,
UO_F32,
UO_F64,
UO_F128,
O_F32,
O_F64,
O_F128,
// MEMORY
MEMCPY,
MEMSET,
MEMMOVE,
// EXCEPTION HANDLING
UNWIND_RESUME,
// Family ATOMICs
SYNC_VAL_COMPARE_AND_SWAP_1,
SYNC_VAL_COMPARE_AND_SWAP_2,
SYNC_VAL_COMPARE_AND_SWAP_4,
SYNC_VAL_COMPARE_AND_SWAP_8,
SYNC_VAL_COMPARE_AND_SWAP_16,
SYNC_LOCK_TEST_AND_SET_1,
SYNC_LOCK_TEST_AND_SET_2,
SYNC_LOCK_TEST_AND_SET_4,
SYNC_LOCK_TEST_AND_SET_8,
SYNC_LOCK_TEST_AND_SET_16,
SYNC_FETCH_AND_ADD_1,
SYNC_FETCH_AND_ADD_2,
SYNC_FETCH_AND_ADD_4,
SYNC_FETCH_AND_ADD_8,
SYNC_FETCH_AND_ADD_16,
SYNC_FETCH_AND_SUB_1,
SYNC_FETCH_AND_SUB_2,
SYNC_FETCH_AND_SUB_4,
SYNC_FETCH_AND_SUB_8,
SYNC_FETCH_AND_SUB_16,
SYNC_FETCH_AND_AND_1,
SYNC_FETCH_AND_AND_2,
SYNC_FETCH_AND_AND_4,
SYNC_FETCH_AND_AND_8,
SYNC_FETCH_AND_AND_16,
SYNC_FETCH_AND_OR_1,
SYNC_FETCH_AND_OR_2,
SYNC_FETCH_AND_OR_4,
SYNC_FETCH_AND_OR_8,
SYNC_FETCH_AND_OR_16,
SYNC_FETCH_AND_XOR_1,
SYNC_FETCH_AND_XOR_2,
SYNC_FETCH_AND_XOR_4,
SYNC_FETCH_AND_XOR_8,
SYNC_FETCH_AND_XOR_16,
SYNC_FETCH_AND_NAND_1,
SYNC_FETCH_AND_NAND_2,
SYNC_FETCH_AND_NAND_4,
SYNC_FETCH_AND_NAND_8,
SYNC_FETCH_AND_NAND_16,
SYNC_FETCH_AND_MAX_1,
SYNC_FETCH_AND_MAX_2,
SYNC_FETCH_AND_MAX_4,
SYNC_FETCH_AND_MAX_8,
SYNC_FETCH_AND_MAX_16,
SYNC_FETCH_AND_UMAX_1,
SYNC_FETCH_AND_UMAX_2,
SYNC_FETCH_AND_UMAX_4,
SYNC_FETCH_AND_UMAX_8,
SYNC_FETCH_AND_UMAX_16,
SYNC_FETCH_AND_MIN_1,
SYNC_FETCH_AND_MIN_2,
SYNC_FETCH_AND_MIN_4,
SYNC_FETCH_AND_MIN_8,
SYNC_FETCH_AND_MIN_16,
SYNC_FETCH_AND_UMIN_1,
SYNC_FETCH_AND_UMIN_2,
SYNC_FETCH_AND_UMIN_4,
SYNC_FETCH_AND_UMIN_8,
SYNC_FETCH_AND_UMIN_16,
// Stack Protector Fail.
STACKPROTECTOR_CHECK_FAIL,
UNKNOWN_LIBCALL
};
/// getFPEXT - Return the FPEXT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
Libcall getFPEXT(EVT OpVT, EVT RetVT);
/// getFPROUND - Return the FPROUND_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
Libcall getFPROUND(EVT OpVT, EVT RetVT);
/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT);
/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
Libcall getFPTOUINT(EVT OpVT, EVT RetVT);
/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
Libcall getSINTTOFP(EVT OpVT, EVT RetVT);
/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
Libcall getUINTTOFP(EVT OpVT, EVT RetVT);
/// Return the SYNC_FETCH_AND_* value for the given opcode and type, or
/// UNKNOWN_LIBCALL if there is none.
Libcall getATOMIC(unsigned Opc, MVT VT);
}
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/RegisterClassInfo.h | //===-- RegisterClassInfo.h - Dynamic Register Class Info -*- C++ -*-------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the RegisterClassInfo class which provides dynamic
// information about target register classes. Callee saved and reserved
// registers depends on calling conventions and other dynamic information, so
// some things cannot be determined statically.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_REGISTERCLASSINFO_H
#define LLVM_CODEGEN_REGISTERCLASSINFO_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
class RegisterClassInfo {
struct RCInfo {
unsigned Tag;
unsigned NumRegs;
bool ProperSubClass;
uint8_t MinCost;
uint16_t LastCostChange;
std::unique_ptr<MCPhysReg[]> Order;
RCInfo()
: Tag(0), NumRegs(0), ProperSubClass(false), MinCost(0),
LastCostChange(0) {}
operator ArrayRef<MCPhysReg>() const {
return makeArrayRef(Order.get(), NumRegs);
}
};
// Brief cached information for each register class.
std::unique_ptr<RCInfo[]> RegClass;
// Tag changes whenever cached information needs to be recomputed. An RCInfo
// entry is valid when its tag matches.
unsigned Tag;
const MachineFunction *MF;
const TargetRegisterInfo *TRI;
// Callee saved registers of last MF. Assumed to be valid until the next
// runOnFunction() call.
const MCPhysReg *CalleeSaved;
// Map register number to CalleeSaved index + 1;
SmallVector<uint8_t, 4> CSRNum;
// Reserved registers in the current MF.
BitVector Reserved;
std::unique_ptr<unsigned[]> PSetLimits;
// Compute all information about RC.
void compute(const TargetRegisterClass *RC) const;
// Return an up-to-date RCInfo for RC.
const RCInfo &get(const TargetRegisterClass *RC) const {
const RCInfo &RCI = RegClass[RC->getID()];
if (Tag != RCI.Tag)
compute(RC);
return RCI;
}
public:
RegisterClassInfo();
/// runOnFunction - Prepare to answer questions about MF. This must be called
/// before any other methods are used.
void runOnMachineFunction(const MachineFunction &MF);
/// getNumAllocatableRegs - Returns the number of actually allocatable
/// registers in RC in the current function.
unsigned getNumAllocatableRegs(const TargetRegisterClass *RC) const {
return get(RC).NumRegs;
}
/// getOrder - Returns the preferred allocation order for RC. The order
/// contains no reserved registers, and registers that alias callee saved
/// registers come last.
ArrayRef<MCPhysReg> getOrder(const TargetRegisterClass *RC) const {
return get(RC);
}
/// isProperSubClass - Returns true if RC has a legal super-class with more
/// allocatable registers.
///
/// Register classes like GR32_NOSP are not proper sub-classes because %esp
/// is not allocatable. Similarly, tGPR is not a proper sub-class in Thumb
/// mode because the GPR super-class is not legal.
bool isProperSubClass(const TargetRegisterClass *RC) const {
return get(RC).ProperSubClass;
}
/// getLastCalleeSavedAlias - Returns the last callee saved register that
/// overlaps PhysReg, or 0 if Reg doesn't overlap a CSR.
unsigned getLastCalleeSavedAlias(unsigned PhysReg) const {
assert(TargetRegisterInfo::isPhysicalRegister(PhysReg));
if (unsigned N = CSRNum[PhysReg])
return CalleeSaved[N-1];
return 0;
}
/// Get the minimum register cost in RC's allocation order.
/// This is the smallest value returned by TRI->getCostPerUse(Reg) for all
/// the registers in getOrder(RC).
unsigned getMinCost(const TargetRegisterClass *RC) {
return get(RC).MinCost;
}
/// Get the position of the last cost change in getOrder(RC).
///
/// All registers in getOrder(RC).slice(getLastCostChange(RC)) will have the
/// same cost according to TRI->getCostPerUse().
unsigned getLastCostChange(const TargetRegisterClass *RC) {
return get(RC).LastCostChange;
}
/// Get the register unit limit for the given pressure set index.
///
/// RegisterClassInfo adjusts this limit for reserved registers.
unsigned getRegPressureSetLimit(unsigned Idx) const {
if (!PSetLimits[Idx])
PSetLimits[Idx] = computePSetLimit(Idx);
return PSetLimits[Idx];
}
protected:
unsigned computePSetLimit(unsigned Idx) const;
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/Passes.h | //===-- Passes.h - Target independent code generation passes ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines interfaces to access the target independent code generation
// passes provided by the LLVM backend.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_PASSES_H
#define LLVM_CODEGEN_PASSES_H
#include "llvm/Pass.h"
#include "llvm/Target/TargetMachine.h"
#include <functional>
#include <string>
namespace llvm {
class MachineFunctionPass;
class PassConfigImpl;
class PassInfo;
class ScheduleDAGInstrs;
class TargetLowering;
class TargetLoweringBase;
class TargetRegisterClass;
class raw_ostream;
struct MachineSchedContext;
// The old pass manager infrastructure is hidden in a legacy namespace now.
namespace legacy {
class PassManagerBase;
}
using legacy::PassManagerBase;
/// Discriminated union of Pass ID types.
///
/// The PassConfig API prefers dealing with IDs because they are safer and more
/// efficient. IDs decouple configuration from instantiation. This way, when a
/// pass is overriden, it isn't unnecessarily instantiated. It is also unsafe to
/// refer to a Pass pointer after adding it to a pass manager, which deletes
/// redundant pass instances.
///
/// However, it is convient to directly instantiate target passes with
/// non-default ctors. These often don't have a registered PassInfo. Rather than
/// force all target passes to implement the pass registry boilerplate, allow
/// the PassConfig API to handle either type.
///
/// AnalysisID is sadly char*, so PointerIntPair won't work.
class IdentifyingPassPtr {
union {
AnalysisID ID;
Pass *P;
};
bool IsInstance;
public:
IdentifyingPassPtr() : P(nullptr), IsInstance(false) {}
IdentifyingPassPtr(AnalysisID IDPtr) : ID(IDPtr), IsInstance(false) {}
IdentifyingPassPtr(Pass *InstancePtr) : P(InstancePtr), IsInstance(true) {}
bool isValid() const { return P; }
bool isInstance() const { return IsInstance; }
AnalysisID getID() const {
assert(!IsInstance && "Not a Pass ID");
return ID;
}
Pass *getInstance() const {
assert(IsInstance && "Not a Pass Instance");
return P;
}
};
template <> struct isPodLike<IdentifyingPassPtr> {
static const bool value = true;
};
/// Target-Independent Code Generator Pass Configuration Options.
///
/// This is an ImmutablePass solely for the purpose of exposing CodeGen options
/// to the internals of other CodeGen passes.
class TargetPassConfig : public ImmutablePass {
public:
/// Pseudo Pass IDs. These are defined within TargetPassConfig because they
/// are unregistered pass IDs. They are only useful for use with
/// TargetPassConfig APIs to identify multiple occurrences of the same pass.
///
/// EarlyTailDuplicate - A clone of the TailDuplicate pass that runs early
/// during codegen, on SSA form.
static char EarlyTailDuplicateID;
/// PostRAMachineLICM - A clone of the LICM pass that runs during late machine
/// optimization after regalloc.
static char PostRAMachineLICMID;
private:
PassManagerBase *PM;
AnalysisID StartBefore, StartAfter;
AnalysisID StopAfter;
bool Started;
bool Stopped;
bool AddingMachinePasses;
protected:
TargetMachine *TM;
PassConfigImpl *Impl; // Internal data structures
bool Initialized; // Flagged after all passes are configured.
// Target Pass Options
// Targets provide a default setting, user flags override.
//
bool DisableVerify;
/// Default setting for -enable-tail-merge on this target.
bool EnableTailMerge;
/// Default setting for -enable-shrink-wrap on this target.
bool EnableShrinkWrap;
public:
TargetPassConfig(TargetMachine *tm, PassManagerBase &pm);
// Dummy constructor.
TargetPassConfig();
~TargetPassConfig() override;
static char ID;
/// Get the right type of TargetMachine for this target.
template<typename TMC> TMC &getTM() const {
return *static_cast<TMC*>(TM);
}
//
void setInitialized() { Initialized = true; }
CodeGenOpt::Level getOptLevel() const { return TM->getOptLevel(); }
/// Set the StartAfter, StartBefore and StopAfter passes to allow running only
/// a portion of the normal code-gen pass sequence.
///
/// If the StartAfter and StartBefore pass ID is zero, then compilation will
/// begin at the normal point; otherwise, clear the Started flag to indicate
/// that passes should not be added until the starting pass is seen. If the
/// Stop pass ID is zero, then compilation will continue to the end.
///
/// This function expects that at least one of the StartAfter or the
/// StartBefore pass IDs is null.
void setStartStopPasses(AnalysisID StartBefore, AnalysisID StartAfter,
AnalysisID StopAfter) {
if (StartAfter)
assert(!StartBefore && "Start after and start before passes are given");
this->StartBefore = StartBefore;
this->StartAfter = StartAfter;
this->StopAfter = StopAfter;
Started = (StartAfter == nullptr) && (StartBefore == nullptr);
}
void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); }
bool getEnableTailMerge() const { return EnableTailMerge; }
void setEnableTailMerge(bool Enable) { setOpt(EnableTailMerge, Enable); }
/// Allow the target to override a specific pass without overriding the pass
/// pipeline. When passes are added to the standard pipeline at the
/// point where StandardID is expected, add TargetID in its place.
void substitutePass(AnalysisID StandardID, IdentifyingPassPtr TargetID);
/// Insert InsertedPassID pass after TargetPassID pass.
void insertPass(AnalysisID TargetPassID, IdentifyingPassPtr InsertedPassID);
/// Allow the target to enable a specific standard pass by default.
void enablePass(AnalysisID PassID) { substitutePass(PassID, PassID); }
/// Allow the target to disable a specific standard pass by default.
void disablePass(AnalysisID PassID) {
substitutePass(PassID, IdentifyingPassPtr());
}
/// Return the pass substituted for StandardID by the target.
/// If no substitution exists, return StandardID.
IdentifyingPassPtr getPassSubstitution(AnalysisID StandardID) const;
/// Return true if the optimized regalloc pipeline is enabled.
bool getOptimizeRegAlloc() const;
/// Return true if shrink wrapping is enabled.
bool getEnableShrinkWrap() const;
/// Return true if the default global register allocator is in use and
/// has not be overriden on the command line with '-regalloc=...'
bool usingDefaultRegAlloc() const;
/// Add common target configurable passes that perform LLVM IR to IR
/// transforms following machine independent optimization.
virtual void addIRPasses();
/// Add passes to lower exception handling for the code generator.
void addPassesToHandleExceptions();
/// Add pass to prepare the LLVM IR for code generation. This should be done
/// before exception handling preparation passes.
virtual void addCodeGenPrepare();
/// Add common passes that perform LLVM IR to IR transforms in preparation for
/// instruction selection.
virtual void addISelPrepare();
/// addInstSelector - This method should install an instruction selector pass,
/// which converts from LLVM code to machine instructions.
virtual bool addInstSelector() {
return true;
}
/// Add the complete, standard set of LLVM CodeGen passes.
/// Fully developed targets will not generally override this.
virtual void addMachinePasses();
/// Create an instance of ScheduleDAGInstrs to be run within the standard
/// MachineScheduler pass for this function and target at the current
/// optimization level.
///
/// This can also be used to plug a new MachineSchedStrategy into an instance
/// of the standard ScheduleDAGMI:
/// return new ScheduleDAGMI(C, make_unique<MyStrategy>(C), /* IsPostRA= */false)
///
/// Return NULL to select the default (generic) machine scheduler.
virtual ScheduleDAGInstrs *
createMachineScheduler(MachineSchedContext *C) const {
return nullptr;
}
/// Similar to createMachineScheduler but used when postRA machine scheduling
/// is enabled.
virtual ScheduleDAGInstrs *
createPostMachineScheduler(MachineSchedContext *C) const {
return nullptr;
}
protected:
// Helper to verify the analysis is really immutable.
void setOpt(bool &Opt, bool Val);
/// Methods with trivial inline returns are convenient points in the common
/// codegen pass pipeline where targets may insert passes. Methods with
/// out-of-line standard implementations are major CodeGen stages called by
/// addMachinePasses. Some targets may override major stages when inserting
/// passes is insufficient, but maintaining overriden stages is more work.
///
/// addPreISelPasses - This method should add any "last minute" LLVM->LLVM
/// passes (which are run just before instruction selector).
virtual bool addPreISel() {
return true;
}
/// addMachineSSAOptimization - Add standard passes that optimize machine
/// instructions in SSA form.
virtual void addMachineSSAOptimization();
/// Add passes that optimize instruction level parallelism for out-of-order
/// targets. These passes are run while the machine code is still in SSA
/// form, so they can use MachineTraceMetrics to control their heuristics.
///
/// All passes added here should preserve the MachineDominatorTree,
/// MachineLoopInfo, and MachineTraceMetrics analyses.
virtual bool addILPOpts() {
return false;
}
/// This method may be implemented by targets that want to run passes
/// immediately before register allocation.
virtual void addPreRegAlloc() { }
/// createTargetRegisterAllocator - Create the register allocator pass for
/// this target at the current optimization level.
virtual FunctionPass *createTargetRegisterAllocator(bool Optimized);
/// addFastRegAlloc - Add the minimum set of target-independent passes that
/// are required for fast register allocation.
virtual void addFastRegAlloc(FunctionPass *RegAllocPass);
/// addOptimizedRegAlloc - Add passes related to register allocation.
/// LLVMTargetMachine provides standard regalloc passes for most targets.
virtual void addOptimizedRegAlloc(FunctionPass *RegAllocPass);
/// addPreRewrite - Add passes to the optimized register allocation pipeline
/// after register allocation is complete, but before virtual registers are
/// rewritten to physical registers.
///
/// These passes must preserve VirtRegMap and LiveIntervals, and when running
/// after RABasic or RAGreedy, they should take advantage of LiveRegMatrix.
/// When these passes run, VirtRegMap contains legal physreg assignments for
/// all virtual registers.
virtual bool addPreRewrite() {
return false;
}
/// This method may be implemented by targets that want to run passes after
/// register allocation pass pipeline but before prolog-epilog insertion.
virtual void addPostRegAlloc() { }
/// Add passes that optimize machine instructions after register allocation.
virtual void addMachineLateOptimization();
/// This method may be implemented by targets that want to run passes after
/// prolog-epilog insertion and before the second instruction scheduling pass.
virtual void addPreSched2() { }
/// addGCPasses - Add late codegen passes that analyze code for garbage
/// collection. This should return true if GC info should be printed after
/// these passes.
virtual bool addGCPasses();
/// Add standard basic block placement passes.
virtual void addBlockPlacement();
/// This pass may be implemented by targets that want to run passes
/// immediately before machine code is emitted.
virtual void addPreEmitPass() { }
/// Utilities for targets to add passes to the pass manager.
///
/// Add a CodeGen pass at this point in the pipeline after checking overrides.
/// Return the pass that was added, or zero if no pass was added.
/// @p printAfter if true and adding a machine function pass add an extra
/// machine printer pass afterwards
/// @p verifyAfter if true and adding a machine function pass add an extra
/// machine verification pass afterwards.
AnalysisID addPass(AnalysisID PassID, bool verifyAfter = true,
bool printAfter = true);
/// Add a pass to the PassManager if that pass is supposed to be run, as
/// determined by the StartAfter and StopAfter options. Takes ownership of the
/// pass.
/// @p printAfter if true and adding a machine function pass add an extra
/// machine printer pass afterwards
/// @p verifyAfter if true and adding a machine function pass add an extra
/// machine verification pass afterwards.
void addPass(Pass *P, bool verifyAfter = true, bool printAfter = true);
/// addMachinePasses helper to create the target-selected or overriden
/// regalloc pass.
FunctionPass *createRegAllocPass(bool Optimized);
/// printAndVerify - Add a pass to dump then verify the machine function, if
/// those steps are enabled.
///
void printAndVerify(const std::string &Banner);
/// Add a pass to print the machine function if printing is enabled.
void addPrintPass(const std::string &Banner);
/// Add a pass to perform basic verification of the machine function if
/// verification is enabled.
void addVerifyPass(const std::string &Banner);
};
} // namespace llvm
/// List of target independent CodeGen pass IDs.
namespace llvm {
FunctionPass *createAtomicExpandPass(const TargetMachine *TM);
/// createUnreachableBlockEliminationPass - The LLVM code generator does not
/// work well with unreachable basic blocks (what live ranges make sense for a
/// block that cannot be reached?). As such, a code generator should either
/// not instruction select unreachable blocks, or run this pass as its
/// last LLVM modifying pass to clean up blocks that are not reachable from
/// the entry block.
FunctionPass *createUnreachableBlockEliminationPass();
/// MachineFunctionPrinter pass - This pass prints out the machine function to
/// the given stream as a debugging tool.
MachineFunctionPass *
createMachineFunctionPrinterPass(raw_ostream &OS,
const std::string &Banner ="");
/// MIRPrinting pass - this pass prints out the LLVM IR into the given stream
/// using the MIR serialization format.
MachineFunctionPass *createPrintMIRPass(raw_ostream &OS);
/// createCodeGenPreparePass - Transform the code to expose more pattern
/// matching during instruction selection.
FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = nullptr);
/// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
/// load-linked/store-conditional loops.
extern char &AtomicExpandID;
/// MachineLoopInfo - This pass is a loop analysis pass.
extern char &MachineLoopInfoID;
/// MachineDominators - This pass is a machine dominators analysis pass.
extern char &MachineDominatorsID;
/// MachineDominanaceFrontier - This pass is a machine dominators analysis pass.
extern char &MachineDominanceFrontierID;
/// EdgeBundles analysis - Bundle machine CFG edges.
extern char &EdgeBundlesID;
/// LiveVariables pass - This pass computes the set of blocks in which each
/// variable is life and sets machine operand kill flags.
extern char &LiveVariablesID;
/// PHIElimination - This pass eliminates machine instruction PHI nodes
/// by inserting copy instructions. This destroys SSA information, but is the
/// desired input for some register allocators. This pass is "required" by
/// these register allocator like this: AU.addRequiredID(PHIEliminationID);
extern char &PHIEliminationID;
/// LiveIntervals - This analysis keeps track of the live ranges of virtual
/// and physical registers.
extern char &LiveIntervalsID;
/// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
extern char &LiveStacksID;
/// TwoAddressInstruction - This pass reduces two-address instructions to
/// use two operands. This destroys SSA information but it is desired by
/// register allocators.
extern char &TwoAddressInstructionPassID;
/// ProcessImpicitDefs pass - This pass removes IMPLICIT_DEFs.
extern char &ProcessImplicitDefsID;
/// RegisterCoalescer - This pass merges live ranges to eliminate copies.
extern char &RegisterCoalescerID;
/// MachineScheduler - This pass schedules machine instructions.
extern char &MachineSchedulerID;
/// PostMachineScheduler - This pass schedules machine instructions postRA.
extern char &PostMachineSchedulerID;
/// SpillPlacement analysis. Suggest optimal placement of spill code between
/// basic blocks.
extern char &SpillPlacementID;
/// ShrinkWrap pass. Look for the best place to insert save and restore
// instruction and update the MachineFunctionInfo with that information.
extern char &ShrinkWrapID;
/// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
/// assigned in VirtRegMap.
extern char &VirtRegRewriterID;
/// UnreachableMachineBlockElimination - This pass removes unreachable
/// machine basic blocks.
extern char &UnreachableMachineBlockElimID;
/// DeadMachineInstructionElim - This pass removes dead machine instructions.
extern char &DeadMachineInstructionElimID;
/// FastRegisterAllocation Pass - This pass register allocates as fast as
/// possible. It is best suited for debug code where live ranges are short.
///
FunctionPass *createFastRegisterAllocator();
/// BasicRegisterAllocation Pass - This pass implements a degenerate global
/// register allocator using the basic regalloc framework.
///
FunctionPass *createBasicRegisterAllocator();
/// Greedy register allocation pass - This pass implements a global register
/// allocator for optimized builds.
///
FunctionPass *createGreedyRegisterAllocator();
/// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean
/// Quadratic Prograaming (PBQP) based register allocator.
///
FunctionPass *createDefaultPBQPRegisterAllocator();
/// PrologEpilogCodeInserter - This pass inserts prolog and epilog code,
/// and eliminates abstract frame references.
extern char &PrologEpilogCodeInserterID;
/// ExpandPostRAPseudos - This pass expands pseudo instructions after
/// register allocation.
extern char &ExpandPostRAPseudosID;
/// createPostRAScheduler - This pass performs post register allocation
/// scheduling.
extern char &PostRASchedulerID;
/// BranchFolding - This pass performs machine code CFG based
/// optimizations to delete branches to branches, eliminate branches to
/// successor blocks (creating fall throughs), and eliminating branches over
/// branches.
extern char &BranchFolderPassID;
/// MachineFunctionPrinterPass - This pass prints out MachineInstr's.
extern char &MachineFunctionPrinterPassID;
/// MIRPrintingPass - this pass prints out the LLVM IR using the MIR
/// serialization format.
extern char &MIRPrintingPassID;
/// TailDuplicate - Duplicate blocks with unconditional branches
/// into tails of their predecessors.
extern char &TailDuplicateID;
/// MachineTraceMetrics - This pass computes critical path and CPU resource
/// usage in an ensemble of traces.
extern char &MachineTraceMetricsID;
/// EarlyIfConverter - This pass performs if-conversion on SSA form by
/// inserting cmov instructions.
extern char &EarlyIfConverterID;
/// This pass performs instruction combining using trace metrics to estimate
/// critical-path and resource depth.
extern char &MachineCombinerID;
/// StackSlotColoring - This pass performs stack coloring and merging.
/// It merges disjoint allocas to reduce the stack size.
extern char &StackColoringID;
/// IfConverter - This pass performs machine code if conversion.
extern char &IfConverterID;
FunctionPass *createIfConverter(std::function<bool(const Function &)> Ftor);
/// MachineBlockPlacement - This pass places basic blocks based on branch
/// probabilities.
extern char &MachineBlockPlacementID;
/// MachineBlockPlacementStats - This pass collects statistics about the
/// basic block placement using branch probabilities and block frequency
/// information.
extern char &MachineBlockPlacementStatsID;
/// GCLowering Pass - Used by gc.root to perform its default lowering
/// operations.
FunctionPass *createGCLoweringPass();
/// ShadowStackGCLowering - Implements the custom lowering mechanism
/// used by the shadow stack GC. Only runs on functions which opt in to
/// the shadow stack collector.
FunctionPass *createShadowStackGCLoweringPass();
/// GCMachineCodeAnalysis - Target-independent pass to mark safe points
/// in machine code. Must be added very late during code generation, just
/// prior to output, and importantly after all CFG transformations (such as
/// branch folding).
extern char &GCMachineCodeAnalysisID;
/// Creates a pass to print GC metadata.
///
FunctionPass *createGCInfoPrinter(raw_ostream &OS);
/// MachineCSE - This pass performs global CSE on machine instructions.
extern char &MachineCSEID;
/// ImplicitNullChecks - This pass folds null pointer checks into nearby
/// memory operations.
extern char &ImplicitNullChecksID;
/// MachineLICM - This pass performs LICM on machine instructions.
extern char &MachineLICMID;
/// MachineSinking - This pass performs sinking on machine instructions.
extern char &MachineSinkingID;
/// MachineCopyPropagation - This pass performs copy propagation on
/// machine instructions.
extern char &MachineCopyPropagationID;
/// PeepholeOptimizer - This pass performs peephole optimizations -
/// like extension and comparison eliminations.
extern char &PeepholeOptimizerID;
/// OptimizePHIs - This pass optimizes machine instruction PHIs
/// to take advantage of opportunities created during DAG legalization.
extern char &OptimizePHIsID;
/// StackSlotColoring - This pass performs stack slot coloring.
extern char &StackSlotColoringID;
/// createStackProtectorPass - This pass adds stack protectors to functions.
///
FunctionPass *createStackProtectorPass(const TargetMachine *TM);
/// createMachineVerifierPass - This pass verifies cenerated machine code
/// instructions for correctness.
///
FunctionPass *createMachineVerifierPass(const std::string& Banner);
/// createDwarfEHPass - This pass mulches exception handling code into a form
/// adapted to code generation. Required if using dwarf exception handling.
FunctionPass *createDwarfEHPass(const TargetMachine *TM);
/// createWinEHPass - Prepares personality functions used by MSVC on Windows,
/// in addition to the Itanium LSDA based personalities.
FunctionPass *createWinEHPass(const TargetMachine *TM);
/// createSjLjEHPreparePass - This pass adapts exception handling code to use
/// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
///
FunctionPass *createSjLjEHPreparePass();
/// LocalStackSlotAllocation - This pass assigns local frame indices to stack
/// slots relative to one another and allocates base registers to access them
/// when it is estimated by the target to be out of range of normal frame
/// pointer or stack pointer index addressing.
extern char &LocalStackSlotAllocationID;
/// ExpandISelPseudos - This pass expands pseudo-instructions.
extern char &ExpandISelPseudosID;
/// createExecutionDependencyFixPass - This pass fixes execution time
/// problems with dependent instructions, such as switching execution
/// domains to match.
///
/// The pass will examine instructions using and defining registers in RC.
///
FunctionPass *createExecutionDependencyFixPass(const TargetRegisterClass *RC);
/// UnpackMachineBundles - This pass unpack machine instruction bundles.
extern char &UnpackMachineBundlesID;
FunctionPass *
createUnpackMachineBundles(std::function<bool(const Function &)> Ftor);
/// FinalizeMachineBundles - This pass finalize machine instruction
/// bundles (created earlier, e.g. during pre-RA scheduling).
extern char &FinalizeMachineBundlesID;
/// StackMapLiveness - This pass analyses the register live-out set of
/// stackmap/patchpoint intrinsics and attaches the calculated information to
/// the intrinsic for later emission to the StackMap.
extern char &StackMapLivenessID;
/// createJumpInstrTables - This pass creates jump-instruction tables.
ModulePass *createJumpInstrTablesPass();
/// createForwardControlFlowIntegrityPass - This pass adds control-flow
/// integrity.
ModulePass *createForwardControlFlowIntegrityPass();
/// InterleavedAccess Pass - This pass identifies and matches interleaved
/// memory accesses to target specific intrinsics.
///
FunctionPass *createInterleavedAccessPass(const TargetMachine *TM);
} // End llvm namespace
/// Target machine pass initializer for passes with dependencies. Use with
/// INITIALIZE_TM_PASS_END.
#define INITIALIZE_TM_PASS_BEGIN INITIALIZE_PASS_BEGIN
/// Target machine pass initializer for passes with dependencies. Use with
/// INITIALIZE_TM_PASS_BEGIN.
#define INITIALIZE_TM_PASS_END(passName, arg, name, cfg, analysis) \
PassInfo *PI = new PassInfo(name, arg, & passName ::ID, \
PassInfo::NormalCtor_t(callDefaultCtor< passName >), cfg, analysis, \
PassInfo::TargetMachineCtor_t(callTargetMachineCtor< passName >)); \
Registry.registerPass(*PI, true); \
return PI; \
} \
void llvm::initialize##passName##Pass(PassRegistry &Registry) { \
CALL_ONCE_INITIALIZATION(initialize##passName##PassOnce) \
}
/// This initializer registers TargetMachine constructor, so the pass being
/// initialized can use target dependent interfaces. Please do not move this
/// macro to be together with INITIALIZE_PASS, which is a complete target
/// independent initializer, and we don't want to make libScalarOpts depend
/// on libCodeGen.
#define INITIALIZE_TM_PASS(passName, arg, name, cfg, analysis) \
INITIALIZE_TM_PASS_BEGIN(passName, arg, name, cfg, analysis) \
INITIALIZE_TM_PASS_END(passName, arg, name, cfg, analysis)
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineBlockFrequencyInfo.h | //===- MachineBlockFrequencyInfo.h - MBB Frequency Analysis -*- C++ -*-----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Loops should be simplified before this analysis.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
#define LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/BlockFrequency.h"
#include <climits>
namespace llvm {
class MachineBasicBlock;
class MachineBranchProbabilityInfo;
template <class BlockT> class BlockFrequencyInfoImpl;
/// MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation
/// to estimate machine basic block frequencies.
class MachineBlockFrequencyInfo : public MachineFunctionPass {
typedef BlockFrequencyInfoImpl<MachineBasicBlock> ImplType;
std::unique_ptr<ImplType> MBFI;
public:
static char ID;
MachineBlockFrequencyInfo();
~MachineBlockFrequencyInfo() override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnMachineFunction(MachineFunction &F) override;
void releaseMemory() override;
/// getblockFreq - Return block frequency. Return 0 if we don't have the
/// information. Please note that initial frequency is equal to 1024. It means
/// that we should not rely on the value itself, but only on the comparison to
/// the other block frequencies. We do this to avoid using of floating points.
///
BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const;
const MachineFunction *getFunction() const;
void view() const;
// Print the block frequency Freq to OS using the current functions entry
// frequency to convert freq into a relative decimal form.
raw_ostream &printBlockFreq(raw_ostream &OS, const BlockFrequency Freq) const;
// Convenience method that attempts to look up the frequency associated with
// BB and print it to OS.
raw_ostream &printBlockFreq(raw_ostream &OS,
const MachineBasicBlock *MBB) const;
uint64_t getEntryFreq() const;
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/DAGCombine.h | //===-- llvm/CodeGen/DAGCombine.h ------- SelectionDAG Nodes ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
#ifndef LLVM_CODEGEN_DAGCOMBINE_H
#define LLVM_CODEGEN_DAGCOMBINE_H
namespace llvm {
enum CombineLevel {
BeforeLegalizeTypes,
AfterLegalizeTypes,
AfterLegalizeVectorOps,
AfterLegalizeDAG
};
} // end llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineFunction.h | //===-- llvm/CodeGen/MachineFunction.h --------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Collect native machine code for a function. This class contains a list of
// MachineBasicBlock instances that make up the current compiled function.
//
// This class also contains pointers to various classes which hold
// target-specific information about the generated code.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEFUNCTION_H
#define LLVM_CODEGEN_MACHINEFUNCTION_H
#include "llvm/ADT/ilist.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ArrayRecycler.h"
#include "llvm/Support/Recycler.h"
namespace llvm {
class Value;
class Function;
class GCModuleInfo;
class MachineRegisterInfo;
class MachineFrameInfo;
class MachineConstantPool;
class MachineJumpTableInfo;
class MachineModuleInfo;
class MCContext;
class Pass;
class TargetMachine;
class TargetSubtargetInfo;
class TargetRegisterClass;
struct MachinePointerInfo;
template <>
struct ilist_traits<MachineBasicBlock>
: public ilist_default_traits<MachineBasicBlock> {
mutable ilist_half_node<MachineBasicBlock> Sentinel;
public:
// HLSL Change Starts
// Temporarily disable "downcast of address" UBSAN runtime error
// https://github.com/microsoft/DirectXShaderCompiler/issues/6446
#ifdef __has_feature
#if __has_feature(undefined_behavior_sanitizer)
__attribute__((no_sanitize("undefined")))
#endif // __has_feature(address_sanitizer)
#endif // defined(__has_feature)
// HLSL Change Ends
MachineBasicBlock *
createSentinel() const {
return static_cast<MachineBasicBlock*>(&Sentinel);
}
void destroySentinel(MachineBasicBlock *) const {}
MachineBasicBlock *provideInitialHead() const { return createSentinel(); }
MachineBasicBlock *ensureHead(MachineBasicBlock*) const {
return createSentinel();
}
static void noteHead(MachineBasicBlock*, MachineBasicBlock*) {}
void addNodeToList(MachineBasicBlock* MBB);
void removeNodeFromList(MachineBasicBlock* MBB);
void deleteNode(MachineBasicBlock *MBB);
private:
void createNode(const MachineBasicBlock &);
};
/// MachineFunctionInfo - This class can be derived from and used by targets to
/// hold private target-specific information for each MachineFunction. Objects
/// of type are accessed/created with MF::getInfo and destroyed when the
/// MachineFunction is destroyed.
struct MachineFunctionInfo {
virtual ~MachineFunctionInfo();
/// \brief Factory function: default behavior is to call new using the
/// supplied allocator.
///
/// This function can be overridden in a derive class.
template<typename Ty>
static Ty *create(BumpPtrAllocator &Allocator, MachineFunction &MF) {
return new (Allocator.Allocate<Ty>()) Ty(MF);
}
};
class MachineFunction {
const Function *Fn;
const TargetMachine &Target;
const TargetSubtargetInfo *STI;
MCContext &Ctx;
MachineModuleInfo &MMI;
// RegInfo - Information about each register in use in the function.
MachineRegisterInfo *RegInfo;
// Used to keep track of target-specific per-machine function information for
// the target implementation.
MachineFunctionInfo *MFInfo;
// Keep track of objects allocated on the stack.
MachineFrameInfo *FrameInfo;
// Keep track of constants which are spilled to memory
MachineConstantPool *ConstantPool;
// Keep track of jump tables for switch instructions
MachineJumpTableInfo *JumpTableInfo;
// Function-level unique numbering for MachineBasicBlocks. When a
// MachineBasicBlock is inserted into a MachineFunction is it automatically
// numbered and this vector keeps track of the mapping from ID's to MBB's.
std::vector<MachineBasicBlock*> MBBNumbering;
// Pool-allocate MachineFunction-lifetime and IR objects.
BumpPtrAllocator Allocator;
// Allocation management for instructions in function.
Recycler<MachineInstr> InstructionRecycler;
// Allocation management for operand arrays on instructions.
ArrayRecycler<MachineOperand> OperandRecycler;
// Allocation management for basic blocks in function.
Recycler<MachineBasicBlock> BasicBlockRecycler;
// List of machine basic blocks in function
typedef ilist<MachineBasicBlock> BasicBlockListType;
BasicBlockListType BasicBlocks;
/// FunctionNumber - This provides a unique ID for each function emitted in
/// this translation unit.
///
unsigned FunctionNumber;
/// Alignment - The alignment of the function.
unsigned Alignment;
/// ExposesReturnsTwice - True if the function calls setjmp or related
/// functions with attribute "returns twice", but doesn't have
/// the attribute itself.
/// This is used to limit optimizations which cannot reason
/// about the control flow of such functions.
bool ExposesReturnsTwice;
/// True if the function includes any inline assembly.
bool HasInlineAsm;
MachineFunction(const MachineFunction &) = delete;
void operator=(const MachineFunction&) = delete;
public:
MachineFunction(const Function *Fn, const TargetMachine &TM,
unsigned FunctionNum, MachineModuleInfo &MMI);
~MachineFunction();
MachineModuleInfo &getMMI() const { return MMI; }
MCContext &getContext() const { return Ctx; }
/// Return the DataLayout attached to the Module associated to this MF.
const DataLayout &getDataLayout() const;
/// getFunction - Return the LLVM function that this machine code represents
///
const Function *getFunction() const { return Fn; }
/// getName - Return the name of the corresponding LLVM function.
///
StringRef getName() const;
/// getFunctionNumber - Return a unique ID for the current function.
///
unsigned getFunctionNumber() const { return FunctionNumber; }
/// getTarget - Return the target machine this machine code is compiled with
///
const TargetMachine &getTarget() const { return Target; }
/// getSubtarget - Return the subtarget for which this machine code is being
/// compiled.
const TargetSubtargetInfo &getSubtarget() const { return *STI; }
void setSubtarget(const TargetSubtargetInfo *ST) { STI = ST; }
/// getSubtarget - This method returns a pointer to the specified type of
/// TargetSubtargetInfo. In debug builds, it verifies that the object being
/// returned is of the correct type.
template<typename STC> const STC &getSubtarget() const {
return *static_cast<const STC *>(STI);
}
/// getRegInfo - Return information about the registers currently in use.
///
MachineRegisterInfo &getRegInfo() { return *RegInfo; }
const MachineRegisterInfo &getRegInfo() const { return *RegInfo; }
/// getFrameInfo - Return the frame info object for the current function.
/// This object contains information about objects allocated on the stack
/// frame of the current function in an abstract way.
///
MachineFrameInfo *getFrameInfo() { return FrameInfo; }
const MachineFrameInfo *getFrameInfo() const { return FrameInfo; }
/// getJumpTableInfo - Return the jump table info object for the current
/// function. This object contains information about jump tables in the
/// current function. If the current function has no jump tables, this will
/// return null.
const MachineJumpTableInfo *getJumpTableInfo() const { return JumpTableInfo; }
MachineJumpTableInfo *getJumpTableInfo() { return JumpTableInfo; }
/// getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it
/// does already exist, allocate one.
MachineJumpTableInfo *getOrCreateJumpTableInfo(unsigned JTEntryKind);
/// getConstantPool - Return the constant pool object for the current
/// function.
///
MachineConstantPool *getConstantPool() { return ConstantPool; }
const MachineConstantPool *getConstantPool() const { return ConstantPool; }
/// getAlignment - Return the alignment (log2, not bytes) of the function.
///
unsigned getAlignment() const { return Alignment; }
/// setAlignment - Set the alignment (log2, not bytes) of the function.
///
void setAlignment(unsigned A) { Alignment = A; }
/// ensureAlignment - Make sure the function is at least 1 << A bytes aligned.
void ensureAlignment(unsigned A) {
if (Alignment < A) Alignment = A;
}
/// exposesReturnsTwice - Returns true if the function calls setjmp or
/// any other similar functions with attribute "returns twice" without
/// having the attribute itself.
bool exposesReturnsTwice() const {
return ExposesReturnsTwice;
}
/// setCallsSetJmp - Set a flag that indicates if there's a call to
/// a "returns twice" function.
void setExposesReturnsTwice(bool B) {
ExposesReturnsTwice = B;
}
/// Returns true if the function contains any inline assembly.
bool hasInlineAsm() const {
return HasInlineAsm;
}
/// Set a flag that indicates that the function contains inline assembly.
void setHasInlineAsm(bool B) {
HasInlineAsm = B;
}
/// getInfo - Keep track of various per-function pieces of information for
/// backends that would like to do so.
///
template<typename Ty>
Ty *getInfo() {
if (!MFInfo)
MFInfo = Ty::template create<Ty>(Allocator, *this);
return static_cast<Ty*>(MFInfo);
}
template<typename Ty>
const Ty *getInfo() const {
return const_cast<MachineFunction*>(this)->getInfo<Ty>();
}
/// getBlockNumbered - MachineBasicBlocks are automatically numbered when they
/// are inserted into the machine function. The block number for a machine
/// basic block can be found by using the MBB::getBlockNumber method, this
/// method provides the inverse mapping.
///
MachineBasicBlock *getBlockNumbered(unsigned N) const {
assert(N < MBBNumbering.size() && "Illegal block number");
assert(MBBNumbering[N] && "Block was removed from the machine function!");
return MBBNumbering[N];
}
/// Should we be emitting segmented stack stuff for the function
bool shouldSplitStack();
/// getNumBlockIDs - Return the number of MBB ID's allocated.
///
unsigned getNumBlockIDs() const { return (unsigned)MBBNumbering.size(); }
/// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
/// recomputes them. This guarantees that the MBB numbers are sequential,
/// dense, and match the ordering of the blocks within the function. If a
/// specific MachineBasicBlock is specified, only that block and those after
/// it are renumbered.
void RenumberBlocks(MachineBasicBlock *MBBFrom = nullptr);
/// print - Print out the MachineFunction in a format suitable for debugging
/// to the specified stream.
///
void print(raw_ostream &OS, SlotIndexes* = nullptr) const;
/// viewCFG - This function is meant for use from the debugger. You can just
/// say 'call F->viewCFG()' and a ghostview window should pop up from the
/// program, displaying the CFG of the current function with the code for each
/// basic block inside. This depends on there being a 'dot' and 'gv' program
/// in your path.
///
void viewCFG() const;
/// viewCFGOnly - This function is meant for use from the debugger. It works
/// just like viewCFG, but it does not include the contents of basic blocks
/// into the nodes, just the label. If you are only interested in the CFG
/// this can make the graph smaller.
///
void viewCFGOnly() const;
/// dump - Print the current MachineFunction to cerr, useful for debugger use.
///
void dump() const;
/// verify - Run the current MachineFunction through the machine code
/// verifier, useful for debugger use.
void verify(Pass *p = nullptr, const char *Banner = nullptr) const;
// Provide accessors for the MachineBasicBlock list...
typedef BasicBlockListType::iterator iterator;
typedef BasicBlockListType::const_iterator const_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
/// addLiveIn - Add the specified physical register as a live-in value and
/// create a corresponding virtual register for it.
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC);
//===--------------------------------------------------------------------===//
// BasicBlock accessor functions.
//
iterator begin() { return BasicBlocks.begin(); }
const_iterator begin() const { return BasicBlocks.begin(); }
iterator end () { return BasicBlocks.end(); }
const_iterator end () const { return BasicBlocks.end(); }
reverse_iterator rbegin() { return BasicBlocks.rbegin(); }
const_reverse_iterator rbegin() const { return BasicBlocks.rbegin(); }
reverse_iterator rend () { return BasicBlocks.rend(); }
const_reverse_iterator rend () const { return BasicBlocks.rend(); }
unsigned size() const { return (unsigned)BasicBlocks.size();}
bool empty() const { return BasicBlocks.empty(); }
const MachineBasicBlock &front() const { return BasicBlocks.front(); }
MachineBasicBlock &front() { return BasicBlocks.front(); }
const MachineBasicBlock & back() const { return BasicBlocks.back(); }
MachineBasicBlock & back() { return BasicBlocks.back(); }
void push_back (MachineBasicBlock *MBB) { BasicBlocks.push_back (MBB); }
void push_front(MachineBasicBlock *MBB) { BasicBlocks.push_front(MBB); }
void insert(iterator MBBI, MachineBasicBlock *MBB) {
BasicBlocks.insert(MBBI, MBB);
}
void splice(iterator InsertPt, iterator MBBI) {
BasicBlocks.splice(InsertPt, BasicBlocks, MBBI);
}
void splice(iterator InsertPt, iterator MBBI, iterator MBBE) {
BasicBlocks.splice(InsertPt, BasicBlocks, MBBI, MBBE);
}
void remove(iterator MBBI) {
BasicBlocks.remove(MBBI);
}
void erase(iterator MBBI) {
BasicBlocks.erase(MBBI);
}
//===--------------------------------------------------------------------===//
// Internal functions used to automatically number MachineBasicBlocks
//
/// \brief Adds the MBB to the internal numbering. Returns the unique number
/// assigned to the MBB.
///
unsigned addToMBBNumbering(MachineBasicBlock *MBB) {
MBBNumbering.push_back(MBB);
return (unsigned)MBBNumbering.size()-1;
}
/// removeFromMBBNumbering - Remove the specific machine basic block from our
/// tracker, this is only really to be used by the MachineBasicBlock
/// implementation.
void removeFromMBBNumbering(unsigned N) {
assert(N < MBBNumbering.size() && "Illegal basic block #");
MBBNumbering[N] = nullptr;
}
/// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
/// of `new MachineInstr'.
///
MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID,
DebugLoc DL,
bool NoImp = false);
/// CloneMachineInstr - Create a new MachineInstr which is a copy of the
/// 'Orig' instruction, identical in all ways except the instruction
/// has no parent, prev, or next.
///
/// See also TargetInstrInfo::duplicate() for target-specific fixes to cloned
/// instructions.
MachineInstr *CloneMachineInstr(const MachineInstr *Orig);
/// DeleteMachineInstr - Delete the given MachineInstr.
///
void DeleteMachineInstr(MachineInstr *MI);
/// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
/// instead of `new MachineBasicBlock'.
///
MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = nullptr);
/// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
///
void DeleteMachineBasicBlock(MachineBasicBlock *MBB);
/// getMachineMemOperand - Allocate a new MachineMemOperand.
/// MachineMemOperands are owned by the MachineFunction and need not be
/// explicitly deallocated.
MachineMemOperand *getMachineMemOperand(MachinePointerInfo PtrInfo,
unsigned f, uint64_t s,
unsigned base_alignment,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr);
/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
/// an existing one, adjusting by an offset and using the given size.
/// MachineMemOperands are owned by the MachineFunction and need not be
/// explicitly deallocated.
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
int64_t Offset, uint64_t Size);
typedef ArrayRecycler<MachineOperand>::Capacity OperandCapacity;
/// Allocate an array of MachineOperands. This is only intended for use by
/// internal MachineInstr functions.
MachineOperand *allocateOperandArray(OperandCapacity Cap) {
return OperandRecycler.allocate(Cap, Allocator);
}
/// Dellocate an array of MachineOperands and recycle the memory. This is
/// only intended for use by internal MachineInstr functions.
/// Cap must be the same capacity that was used to allocate the array.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array) {
OperandRecycler.deallocate(Cap, Array);
}
/// \brief Allocate and initialize a register mask with @p NumRegister bits.
uint32_t *allocateRegisterMask(unsigned NumRegister) {
unsigned Size = (NumRegister + 31) / 32;
uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
for (unsigned i = 0; i != Size; ++i)
Mask[i] = 0;
return Mask;
}
/// allocateMemRefsArray - Allocate an array to hold MachineMemOperand
/// pointers. This array is owned by the MachineFunction.
MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num);
/// extractLoadMemRefs - Allocate an array and populate it with just the
/// load information from the given MachineMemOperand sequence.
std::pair<MachineInstr::mmo_iterator,
MachineInstr::mmo_iterator>
extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
MachineInstr::mmo_iterator End);
/// extractStoreMemRefs - Allocate an array and populate it with just the
/// store information from the given MachineMemOperand sequence.
std::pair<MachineInstr::mmo_iterator,
MachineInstr::mmo_iterator>
extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
MachineInstr::mmo_iterator End);
//===--------------------------------------------------------------------===//
// Label Manipulation.
//
/// getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
/// normal 'L' label is returned.
MCSymbol *getJTISymbol(unsigned JTI, MCContext &Ctx,
bool isLinkerPrivate = false) const;
/// getPICBaseSymbol - Return a function-local symbol to represent the PIC
/// base.
MCSymbol *getPICBaseSymbol() const;
};
//===--------------------------------------------------------------------===//
// GraphTraits specializations for function basic block graphs (CFGs)
//===--------------------------------------------------------------------===//
// Provide specializations of GraphTraits to be able to treat a
// machine function as a graph of machine basic blocks... these are
// the same as the machine basic block iterators, except that the root
// node is implicitly the first node of the function.
//
template <> struct GraphTraits<MachineFunction*> :
public GraphTraits<MachineBasicBlock*> {
static NodeType *getEntryNode(MachineFunction *F) {
return &F->front();
}
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
typedef MachineFunction::iterator nodes_iterator;
static nodes_iterator nodes_begin(MachineFunction *F) { return F->begin(); }
static nodes_iterator nodes_end (MachineFunction *F) { return F->end(); }
static unsigned size (MachineFunction *F) { return F->size(); }
};
template <> struct GraphTraits<const MachineFunction*> :
public GraphTraits<const MachineBasicBlock*> {
static NodeType *getEntryNode(const MachineFunction *F) {
return &F->front();
}
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
typedef MachineFunction::const_iterator nodes_iterator;
static nodes_iterator nodes_begin(const MachineFunction *F) {
return F->begin();
}
static nodes_iterator nodes_end (const MachineFunction *F) {
return F->end();
}
static unsigned size (const MachineFunction *F) {
return F->size();
}
};
// Provide specializations of GraphTraits to be able to treat a function as a
// graph of basic blocks... and to walk it in inverse order. Inverse order for
// a function is considered to be when traversing the predecessor edges of a BB
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<MachineFunction*> > :
public GraphTraits<Inverse<MachineBasicBlock*> > {
static NodeType *getEntryNode(Inverse<MachineFunction*> G) {
return &G.Graph->front();
}
};
template <> struct GraphTraits<Inverse<const MachineFunction*> > :
public GraphTraits<Inverse<const MachineBasicBlock*> > {
static NodeType *getEntryNode(Inverse<const MachineFunction *> G) {
return &G.Graph->front();
}
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/SelectionDAGISel.h | //===-- llvm/CodeGen/SelectionDAGISel.h - Common Base Class------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the SelectionDAGISel class, which is used as the common
// base class for SelectionDAG-based instruction selectors.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_SELECTIONDAGISEL_H
#define LLVM_CODEGEN_SELECTIONDAGISEL_H
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/Pass.h"
#include "llvm/Target/TargetSubtargetInfo.h"
namespace llvm {
class FastISel;
class SelectionDAGBuilder;
class SDValue;
class MachineRegisterInfo;
class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class TargetLowering;
class TargetLibraryInfo;
class FunctionLoweringInfo;
class ScheduleHazardRecognizer;
class GCFunctionInfo;
class ScheduleDAGSDNodes;
class LoadInst;
/// SelectionDAGISel - This is the common base class used for SelectionDAG-based
/// pattern-matching instruction selectors.
class SelectionDAGISel : public MachineFunctionPass {
public:
TargetMachine &TM;
const TargetLibraryInfo *LibInfo;
FunctionLoweringInfo *FuncInfo;
MachineFunction *MF;
MachineRegisterInfo *RegInfo;
SelectionDAG *CurDAG;
SelectionDAGBuilder *SDB;
AliasAnalysis *AA;
GCFunctionInfo *GFI;
CodeGenOpt::Level OptLevel;
const TargetInstrInfo *TII;
const TargetLowering *TLI;
static char ID;
explicit SelectionDAGISel(TargetMachine &tm,
CodeGenOpt::Level OL = CodeGenOpt::Default);
~SelectionDAGISel() override;
const TargetLowering *getTargetLowering() const { return TLI; }
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnMachineFunction(MachineFunction &MF) override;
virtual void EmitFunctionEntryCode() {}
/// PreprocessISelDAG - This hook allows targets to hack on the graph before
/// instruction selection starts.
virtual void PreprocessISelDAG() {}
/// PostprocessISelDAG() - This hook allows the target to hack on the graph
/// right after selection.
virtual void PostprocessISelDAG() {}
/// Select - Main hook targets implement to select a node.
virtual SDNode *Select(SDNode *N) = 0;
/// SelectInlineAsmMemoryOperand - Select the specified address as a target
/// addressing mode, according to the specified constraint. If this does
/// not match or is not implemented, return true. The resultant operands
/// (which will appear in the machine instruction) should be added to the
/// OutOps vector.
virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
unsigned ConstraintID,
std::vector<SDValue> &OutOps) {
return true;
}
/// IsProfitableToFold - Returns true if it's profitable to fold the specific
/// operand node N of U during instruction selection that starts at Root.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
/// IsLegalToFold - Returns true if the specific operand node N of
/// U can be folded during instruction selection that starts at Root.
/// FIXME: This is a static member function because the MSP430/X86
/// targets, which uses it during isel. This could become a proper member.
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
CodeGenOpt::Level OptLevel,
bool IgnoreChains = false);
// Opcodes used by the DAG state machine:
enum BuiltinOpcodes {
OPC_Scope,
OPC_RecordNode,
OPC_RecordChild0, OPC_RecordChild1, OPC_RecordChild2, OPC_RecordChild3,
OPC_RecordChild4, OPC_RecordChild5, OPC_RecordChild6, OPC_RecordChild7,
OPC_RecordMemRef,
OPC_CaptureGlueInput,
OPC_MoveChild,
OPC_MoveParent,
OPC_CheckSame,
OPC_CheckChild0Same, OPC_CheckChild1Same,
OPC_CheckChild2Same, OPC_CheckChild3Same,
OPC_CheckPatternPredicate,
OPC_CheckPredicate,
OPC_CheckOpcode,
OPC_SwitchOpcode,
OPC_CheckType,
OPC_SwitchType,
OPC_CheckChild0Type, OPC_CheckChild1Type, OPC_CheckChild2Type,
OPC_CheckChild3Type, OPC_CheckChild4Type, OPC_CheckChild5Type,
OPC_CheckChild6Type, OPC_CheckChild7Type,
OPC_CheckInteger,
OPC_CheckChild0Integer, OPC_CheckChild1Integer, OPC_CheckChild2Integer,
OPC_CheckChild3Integer, OPC_CheckChild4Integer,
OPC_CheckCondCode,
OPC_CheckValueType,
OPC_CheckComplexPat,
OPC_CheckAndImm, OPC_CheckOrImm,
OPC_CheckFoldableChainNode,
OPC_EmitInteger,
OPC_EmitRegister,
OPC_EmitRegister2,
OPC_EmitConvertToTarget,
OPC_EmitMergeInputChains,
OPC_EmitMergeInputChains1_0,
OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg,
OPC_EmitNodeXForm,
OPC_EmitNode,
OPC_MorphNodeTo,
OPC_MarkGlueResults,
OPC_CompleteMatch
};
enum {
OPFL_None = 0, // Node has no chain or glue input and isn't variadic.
OPFL_Chain = 1, // Node has a chain input.
OPFL_GlueInput = 2, // Node has a glue input.
OPFL_GlueOutput = 4, // Node has a glue output.
OPFL_MemRefs = 8, // Node gets accumulated MemRefs.
OPFL_Variadic0 = 1<<4, // Node is variadic, root has 0 fixed inputs.
OPFL_Variadic1 = 2<<4, // Node is variadic, root has 1 fixed inputs.
OPFL_Variadic2 = 3<<4, // Node is variadic, root has 2 fixed inputs.
OPFL_Variadic3 = 4<<4, // Node is variadic, root has 3 fixed inputs.
OPFL_Variadic4 = 5<<4, // Node is variadic, root has 4 fixed inputs.
OPFL_Variadic5 = 6<<4, // Node is variadic, root has 5 fixed inputs.
OPFL_Variadic6 = 7<<4, // Node is variadic, root has 6 fixed inputs.
OPFL_VariadicInfo = OPFL_Variadic6
};
/// getNumFixedFromVariadicInfo - Transform an EmitNode flags word into the
/// number of fixed arity values that should be skipped when copying from the
/// root.
static inline int getNumFixedFromVariadicInfo(unsigned Flags) {
return ((Flags&OPFL_VariadicInfo) >> 4)-1;
}
protected:
/// DAGSize - Size of DAG being instruction selected.
///
unsigned DAGSize;
/// ReplaceUses - replace all uses of the old node F with the use
/// of the new node T.
void ReplaceUses(SDValue F, SDValue T) {
CurDAG->ReplaceAllUsesOfValueWith(F, T);
}
/// ReplaceUses - replace all uses of the old nodes F with the use
/// of the new nodes T.
void ReplaceUses(const SDValue *F, const SDValue *T, unsigned Num) {
CurDAG->ReplaceAllUsesOfValuesWith(F, T, Num);
}
/// ReplaceUses - replace all uses of the old node F with the use
/// of the new node T.
void ReplaceUses(SDNode *F, SDNode *T) {
CurDAG->ReplaceAllUsesWith(F, T);
}
/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
/// by tblgen. Others should not call it.
void SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops, SDLoc DL);
public:
// Calls to these predicates are generated by tblgen.
bool CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const;
bool CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const;
/// CheckPatternPredicate - This function is generated by tblgen in the
/// target. It runs the specified pattern predicate and returns true if it
/// succeeds or false if it fails. The number is a private implementation
/// detail to the code tblgen produces.
virtual bool CheckPatternPredicate(unsigned PredNo) const {
llvm_unreachable("Tblgen should generate the implementation of this!");
}
/// CheckNodePredicate - This function is generated by tblgen in the target.
/// It runs node predicate number PredNo and returns true if it succeeds or
/// false if it fails. The number is a private implementation
/// detail to the code tblgen produces.
virtual bool CheckNodePredicate(SDNode *N, unsigned PredNo) const {
llvm_unreachable("Tblgen should generate the implementation of this!");
}
virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N,
unsigned PatternNo,
SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {
llvm_unreachable("Tblgen should generate the implementation of this!");
}
virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {
llvm_unreachable("Tblgen should generate this!");
}
SDNode *SelectCodeCommon(SDNode *NodeToMatch,
const unsigned char *MatcherTable,
unsigned TableSize);
/// \brief Return true if complex patterns for this target can mutate the
/// DAG.
virtual bool ComplexPatternFuncMutatesDAG() const {
return false;
}
private:
// Calls to these functions are generated by tblgen.
SDNode *Select_INLINEASM(SDNode *N);
SDNode *Select_READ_REGISTER(SDNode *N);
SDNode *Select_WRITE_REGISTER(SDNode *N);
SDNode *Select_UNDEF(SDNode *N);
void CannotYetSelect(SDNode *N);
private:
void DoInstructionSelection();
SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
ArrayRef<SDValue> Ops, unsigned EmitNodeInfo);
/// Prepares the landing pad to take incoming values or do other EH
/// personality specific tasks. Returns true if the block should be
/// instruction selected, false if no code should be emitted for it.
bool PrepareEHLandingPad();
/// \brief Perform instruction selection on all basic blocks in the function.
void SelectAllBasicBlocks(const Function &Fn);
/// \brief Perform instruction selection on a single basic block, for
/// instructions between \p Begin and \p End. \p HadTailCall will be set
/// to true if a call in the block was translated as a tail call.
void SelectBasicBlock(BasicBlock::const_iterator Begin,
BasicBlock::const_iterator End,
bool &HadTailCall);
void FinishBasicBlock();
void CodeGenAndEmitDAG();
/// \brief Generate instructions for lowering the incoming arguments of the
/// given function.
void LowerArguments(const Function &F);
void ComputeLiveOutVRegInfo();
/// Create the scheduler. If a specific scheduler was specified
/// via the SchedulerRegistry, use it, otherwise select the
/// one preferred by the target.
///
ScheduleDAGSDNodes *CreateScheduler();
/// OpcodeOffset - This is a cache used to dispatch efficiently into isel
/// state machines that start with a OPC_SwitchOpcode node.
std::vector<unsigned> OpcodeOffset;
void UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
const SmallVectorImpl<SDNode*> &ChainNodesMatched,
SDValue InputGlue, const SmallVectorImpl<SDNode*> &F,
bool isMorphNodeTo);
};
}
#endif /* LLVM_CODEGEN_SELECTIONDAGISEL_H */
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/FastISel.h | //===-- FastISel.h - Definition of the FastISel class ---*- C++ -*---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the FastISel class.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_FASTISEL_H
#define LLVM_CODEGEN_FASTISEL_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Target/TargetLowering.h"
namespace llvm {
/// \brief This is a fast-path instruction selection class that generates poor
/// code and doesn't support illegal types or non-trivial lowering, but runs
/// quickly.
class FastISel {
public:
struct ArgListEntry {
Value *Val;
Type *Ty;
bool IsSExt : 1;
bool IsZExt : 1;
bool IsInReg : 1;
bool IsSRet : 1;
bool IsNest : 1;
bool IsByVal : 1;
bool IsInAlloca : 1;
bool IsReturned : 1;
uint16_t Alignment;
ArgListEntry()
: Val(nullptr), Ty(nullptr), IsSExt(false), IsZExt(false),
IsInReg(false), IsSRet(false), IsNest(false), IsByVal(false),
IsInAlloca(false), IsReturned(false), Alignment(0) {}
/// \brief Set CallLoweringInfo attribute flags based on a call instruction
/// and called function attributes.
void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
};
typedef std::vector<ArgListEntry> ArgListTy;
struct CallLoweringInfo {
Type *RetTy;
bool RetSExt : 1;
bool RetZExt : 1;
bool IsVarArg : 1;
bool IsInReg : 1;
bool DoesNotReturn : 1;
bool IsReturnValueUsed : 1;
// \brief IsTailCall Should be modified by implementations of FastLowerCall
// that perform tail call conversions.
bool IsTailCall;
unsigned NumFixedArgs;
CallingConv::ID CallConv;
const Value *Callee;
MCSymbol *Symbol;
ArgListTy Args;
ImmutableCallSite *CS;
MachineInstr *Call;
unsigned ResultReg;
unsigned NumResultRegs;
bool IsPatchPoint;
SmallVector<Value *, 16> OutVals;
SmallVector<ISD::ArgFlagsTy, 16> OutFlags;
SmallVector<unsigned, 16> OutRegs;
SmallVector<ISD::InputArg, 4> Ins;
SmallVector<unsigned, 4> InRegs;
CallLoweringInfo()
: RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
Callee(nullptr), Symbol(nullptr), CS(nullptr), Call(nullptr),
ResultReg(0), NumResultRegs(0), IsPatchPoint(false) {}
CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
const Value *Target, ArgListTy &&ArgsList,
ImmutableCallSite &Call) {
RetTy = ResultTy;
Callee = Target;
IsInReg = Call.paramHasAttr(0, Attribute::InReg);
DoesNotReturn = Call.doesNotReturn();
IsVarArg = FuncTy->isVarArg();
IsReturnValueUsed = !Call.getInstruction()->use_empty();
RetSExt = Call.paramHasAttr(0, Attribute::SExt);
RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
CallConv = Call.getCallingConv();
Args = std::move(ArgsList);
NumFixedArgs = FuncTy->getNumParams();
CS = &Call;
return *this;
}
CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
MCSymbol *Target, ArgListTy &&ArgsList,
ImmutableCallSite &Call,
unsigned FixedArgs = ~0U) {
RetTy = ResultTy;
Callee = Call.getCalledValue();
Symbol = Target;
IsInReg = Call.paramHasAttr(0, Attribute::InReg);
DoesNotReturn = Call.doesNotReturn();
IsVarArg = FuncTy->isVarArg();
IsReturnValueUsed = !Call.getInstruction()->use_empty();
RetSExt = Call.paramHasAttr(0, Attribute::SExt);
RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
CallConv = Call.getCallingConv();
Args = std::move(ArgsList);
NumFixedArgs = (FixedArgs == ~0U) ? FuncTy->getNumParams() : FixedArgs;
CS = &Call;
return *this;
}
CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
const Value *Target, ArgListTy &&ArgsList,
unsigned FixedArgs = ~0U) {
RetTy = ResultTy;
Callee = Target;
CallConv = CC;
Args = std::move(ArgsList);
NumFixedArgs = (FixedArgs == ~0U) ? Args.size() : FixedArgs;
return *this;
}
CallLoweringInfo &setCallee(const DataLayout &DL, MCContext &Ctx,
CallingConv::ID CC, Type *ResultTy,
const char *Target, ArgListTy &&ArgsList,
unsigned FixedArgs = ~0U);
CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
MCSymbol *Target, ArgListTy &&ArgsList,
unsigned FixedArgs = ~0U) {
RetTy = ResultTy;
Symbol = Target;
CallConv = CC;
Args = std::move(ArgsList);
NumFixedArgs = (FixedArgs == ~0U) ? Args.size() : FixedArgs;
return *this;
}
CallLoweringInfo &setTailCall(bool Value = true) {
IsTailCall = Value;
return *this;
}
CallLoweringInfo &setIsPatchPoint(bool Value = true) {
IsPatchPoint = Value;
return *this;
}
ArgListTy &getArgs() { return Args; }
void clearOuts() {
OutVals.clear();
OutFlags.clear();
OutRegs.clear();
}
void clearIns() {
Ins.clear();
InRegs.clear();
}
};
protected:
DenseMap<const Value *, unsigned> LocalValueMap;
FunctionLoweringInfo &FuncInfo;
MachineFunction *MF;
MachineRegisterInfo &MRI;
MachineFrameInfo &MFI;
MachineConstantPool &MCP;
DebugLoc DbgLoc;
const TargetMachine &TM;
const DataLayout &DL;
const TargetInstrInfo &TII;
const TargetLowering &TLI;
const TargetRegisterInfo &TRI;
const TargetLibraryInfo *LibInfo;
bool SkipTargetIndependentISel;
/// \brief The position of the last instruction for materializing constants
/// for use in the current block. It resets to EmitStartPt when it makes sense
/// (for example, it's usually profitable to avoid function calls between the
/// definition and the use)
MachineInstr *LastLocalValue;
/// \brief The top most instruction in the current block that is allowed for
/// emitting local variables. LastLocalValue resets to EmitStartPt when it
/// makes sense (for example, on function calls)
MachineInstr *EmitStartPt;
public:
/// \brief Return the position of the last instruction emitted for
/// materializing constants for use in the current block.
MachineInstr *getLastLocalValue() { return LastLocalValue; }
/// \brief Update the position of the last instruction emitted for
/// materializing constants for use in the current block.
void setLastLocalValue(MachineInstr *I) {
EmitStartPt = I;
LastLocalValue = I;
}
/// \brief Set the current block to which generated machine instructions will
/// be appended, and clear the local CSE map.
void startNewBlock();
/// \brief Return current debug location information.
DebugLoc getCurDebugLoc() const { return DbgLoc; }
/// \brief Do "fast" instruction selection for function arguments and append
/// the machine instructions to the current block. Returns true when
/// successful.
bool lowerArguments();
/// \brief Do "fast" instruction selection for the given LLVM IR instruction
/// and append the generated machine instructions to the current block.
/// Returns true if selection was successful.
bool selectInstruction(const Instruction *I);
/// \brief Do "fast" instruction selection for the given LLVM IR operator
/// (Instruction or ConstantExpr), and append generated machine instructions
/// to the current block. Return true if selection was successful.
bool selectOperator(const User *I, unsigned Opcode);
/// \brief Create a virtual register and arrange for it to be assigned the
/// value for the given LLVM value.
unsigned getRegForValue(const Value *V);
/// \brief Look up the value to see if its value is already cached in a
/// register. It may be defined by instructions across blocks or defined
/// locally.
unsigned lookUpRegForValue(const Value *V);
/// \brief This is a wrapper around getRegForValue that also takes care of
/// truncating or sign-extending the given getelementptr index value.
std::pair<unsigned, bool> getRegForGEPIndex(const Value *V);
/// \brief We're checking to see if we can fold \p LI into \p FoldInst. Note
/// that we could have a sequence where multiple LLVM IR instructions are
/// folded into the same machineinstr. For example we could have:
///
/// A: x = load i32 *P
/// B: y = icmp A, 42
/// C: br y, ...
///
/// In this scenario, \p LI is "A", and \p FoldInst is "C". We know about "B"
/// (and any other folded instructions) because it is between A and C.
///
/// If we succeed folding, return true.
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst);
/// \brief The specified machine instr operand is a vreg, and that vreg is
/// being provided by the specified load instruction. If possible, try to
/// fold the load as an operand to the instruction, returning true if
/// possible.
///
/// This method should be implemented by targets.
virtual bool tryToFoldLoadIntoMI(MachineInstr * /*MI*/, unsigned /*OpNo*/,
const LoadInst * /*LI*/) {
return false;
}
/// \brief Reset InsertPt to prepare for inserting instructions into the
/// current block.
void recomputeInsertPt();
/// \brief Remove all dead instructions between the I and E.
void removeDeadCode(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator E);
struct SavePoint {
MachineBasicBlock::iterator InsertPt;
DebugLoc DL;
};
/// \brief Prepare InsertPt to begin inserting instructions into the local
/// value area and return the old insert position.
SavePoint enterLocalValueArea();
/// \brief Reset InsertPt to the given old insert position.
void leaveLocalValueArea(SavePoint Old);
virtual ~FastISel();
protected:
explicit FastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo,
bool SkipTargetIndependentISel = false);
/// \brief This method is called by target-independent code when the normal
/// FastISel process fails to select an instruction. This gives targets a
/// chance to emit code for anything that doesn't fit into FastISel's
/// framework. It returns true if it was successful.
virtual bool fastSelectInstruction(const Instruction *I) = 0;
/// \brief This method is called by target-independent code to do target-
/// specific argument lowering. It returns true if it was successful.
virtual bool fastLowerArguments();
/// \brief This method is called by target-independent code to do target-
/// specific call lowering. It returns true if it was successful.
virtual bool fastLowerCall(CallLoweringInfo &CLI);
/// \brief This method is called by target-independent code to do target-
/// specific intrinsic lowering. It returns true if it was successful.
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type and opcode be emitted.
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register operand be emitted.
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register operands be emitted.
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register and immediate
// operands be emitted.
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, uint64_t Imm);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register and floating-point
/// immediate operands be emitted.
virtual unsigned fastEmit_rf(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, const ConstantFP *FPImm);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register and immediate
/// operands be emitted.
virtual unsigned fastEmit_rri(MVT VT, MVT RetVT, unsigned Opcode,
unsigned Op0, bool Op0IsKill, unsigned Op1,
bool Op1IsKill, uint64_t Imm);
/// \brief This method is a wrapper of fastEmit_ri.
///
/// It first tries to emit an instruction with an immediate operand using
/// fastEmit_ri. If that fails, it materializes the immediate into a register
/// and try fastEmit_rr instead.
unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill,
uint64_t Imm, MVT ImmType);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and immediate operand be emitted.
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and floating-point immediate
/// operand be emitted.
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode,
const ConstantFP *FPImm);
/// \brief Emit a MachineInstr with no operands and a result register in the
/// given register class.
unsigned fastEmitInst_(unsigned MachineInstOpcode,
const TargetRegisterClass *RC);
/// \brief Emit a MachineInstr with one register operand and a result register
/// in the given register class.
unsigned fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill);
/// \brief Emit a MachineInstr with two register operands and a result
/// register in the given register class.
unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill);
/// \brief Emit a MachineInstr with three register operands and a result
/// register in the given register class.
unsigned fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
unsigned Op2, bool Op2IsKill);
/// \brief Emit a MachineInstr with a register operand, an immediate, and a
/// result register in the given register class.
unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm);
/// \brief Emit a MachineInstr with one register operand and two immediate
/// operands.
unsigned fastEmitInst_rii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm1, uint64_t Imm2);
/// \brief Emit a MachineInstr with two register operands and a result
/// register in the given register class.
unsigned fastEmitInst_rf(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, const ConstantFP *FPImm);
/// \brief Emit a MachineInstr with two register operands, an immediate, and a
/// result register in the given register class.
unsigned fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
uint64_t Imm);
/// \brief Emit a MachineInstr with two register operands, two immediates
/// operands, and a result register in the given register class.
unsigned fastEmitInst_rrii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
uint64_t Imm1, uint64_t Imm2);
/// \brief Emit a MachineInstr with a single immediate operand, and a result
/// register in the given register class.
unsigned fastEmitInst_i(unsigned MachineInstrOpcode,
const TargetRegisterClass *RC, uint64_t Imm);
/// \brief Emit a MachineInstr with a two immediate operands.
unsigned fastEmitInst_ii(unsigned MachineInstrOpcode,
const TargetRegisterClass *RC, uint64_t Imm1,
uint64_t Imm2);
/// \brief Emit a MachineInstr for an extract_subreg from a specified index of
/// a superregister to a specified type.
unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill,
uint32_t Idx);
/// \brief Emit MachineInstrs to compute the value of Op with all but the
/// least significant bit set to zero.
unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill);
/// \brief Emit an unconditional branch to the given block, unless it is the
/// immediate (fall-through) successor, and update the CFG.
void fastEmitBranch(MachineBasicBlock *MBB, DebugLoc DL);
/// \brief Update the value map to include the new mapping for this
/// instruction, or insert an extra copy to get the result in a previous
/// determined register.
///
/// NOTE: This is only necessary because we might select a block that uses a
/// value before we select the block that defines the value. It might be
/// possible to fix this by selecting blocks in reverse postorder.
void updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs = 1);
unsigned createResultReg(const TargetRegisterClass *RC);
/// \brief Try to constrain Op so that it is usable by argument OpNum of the
/// provided MCInstrDesc. If this fails, create a new virtual register in the
/// correct class and COPY the value there.
unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
unsigned OpNum);
/// \brief Emit a constant in a register using target-specific logic, such as
/// constant pool loads.
virtual unsigned fastMaterializeConstant(const Constant *C) { return 0; }
/// \brief Emit an alloca address in a register using target-specific logic.
virtual unsigned fastMaterializeAlloca(const AllocaInst *C) { return 0; }
/// \brief Emit the floating-point constant +0.0 in a register using target-
/// specific logic.
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF) {
return 0;
}
/// \brief Check if \c Add is an add that can be safely folded into \c GEP.
///
/// \c Add can be folded into \c GEP if:
/// - \c Add is an add,
/// - \c Add's size matches \c GEP's,
/// - \c Add is in the same basic block as \c GEP, and
/// - \c Add has a constant operand.
bool canFoldAddIntoGEP(const User *GEP, const Value *Add);
/// \brief Test whether the given value has exactly one use.
bool hasTrivialKill(const Value *V);
/// \brief Create a machine mem operand from the given instruction.
MachineMemOperand *createMachineMemOperandFor(const Instruction *I) const;
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const;
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs);
bool lowerCallTo(const CallInst *CI, const char *SymbolName,
unsigned NumArgs);
bool lowerCallTo(CallLoweringInfo &CLI);
bool isCommutativeIntrinsic(IntrinsicInst const *II) {
switch (II->getIntrinsicID()) {
case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow:
case Intrinsic::smul_with_overflow:
case Intrinsic::umul_with_overflow:
return true;
default:
return false;
}
}
bool lowerCall(const CallInst *I);
/// \brief Select and emit code for a binary operator instruction, which has
/// an opcode which directly corresponds to the given ISD opcode.
bool selectBinaryOp(const User *I, unsigned ISDOpcode);
bool selectFNeg(const User *I);
bool selectGetElementPtr(const User *I);
bool selectStackmap(const CallInst *I);
bool selectPatchpoint(const CallInst *I);
bool selectCall(const User *Call);
bool selectIntrinsicCall(const IntrinsicInst *II);
bool selectBitCast(const User *I);
bool selectCast(const User *I, unsigned Opcode);
bool selectExtractValue(const User *I);
bool selectInsertValue(const User *I);
private:
/// \brief Handle PHI nodes in successor blocks.
///
/// Emit code to ensure constants are copied into registers when needed.
/// Remember the virtual registers that need to be added to the Machine PHI
/// nodes as input. We cannot just directly add them, because expansion might
/// result in multiple MBB's for one BB. As such, the start of the BB might
/// correspond to a different MBB than the end.
bool handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
/// \brief Helper for materializeRegForValue to materialize a constant in a
/// target-independent way.
unsigned materializeConstant(const Value *V, MVT VT);
/// \brief Helper for getRegForVale. This function is called when the value
/// isn't already available in a register and must be materialized with new
/// instructions.
unsigned materializeRegForValue(const Value *V, MVT VT);
/// \brief Clears LocalValueMap and moves the area for the new local variables
/// to the beginning of the block. It helps to avoid spilling cached variables
/// across heavy instructions like calls.
void flushLocalValueMap();
/// \brief Insertion point before trying to select the current instruction.
MachineBasicBlock::iterator SavedInsertPt;
/// \brief Add a stackmap or patchpoint intrinsic call's live variable
/// operands to a stackmap or patchpoint machine instruction.
bool addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
const CallInst *CI, unsigned StartIdx);
bool lowerCallOperands(const CallInst *CI, unsigned ArgIdx, unsigned NumArgs,
const Value *Callee, bool ForceRetVoidTy,
CallLoweringInfo &CLI);
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/CodeGen/MachineRegisterInfo.h | //===-- llvm/CodeGen/MachineRegisterInfo.h ----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the MachineRegisterInfo class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEREGISTERINFO_H
#define LLVM_CODEGEN_MACHINEREGISTERINFO_H
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <vector>
namespace llvm {
class PSetIterator;
/// MachineRegisterInfo - Keep track of information for virtual and physical
/// registers, including vreg register classes, use/def chains for registers,
/// etc.
class MachineRegisterInfo {
public:
class Delegate {
virtual void anchor();
public:
virtual void MRI_NoteNewVirtualRegister(unsigned Reg) = 0;
virtual ~Delegate() {}
};
private:
const MachineFunction *MF;
Delegate *TheDelegate;
/// IsSSA - True when the machine function is in SSA form and virtual
/// registers have a single def.
bool IsSSA;
/// TracksLiveness - True while register liveness is being tracked accurately.
/// Basic block live-in lists, kill flags, and implicit defs may not be
/// accurate when after this flag is cleared.
bool TracksLiveness;
/// True if subregister liveness is tracked.
bool TracksSubRegLiveness;
/// VRegInfo - Information we keep for each virtual register.
///
/// Each element in this list contains the register class of the vreg and the
/// start of the use/def list for the register.
IndexedMap<std::pair<const TargetRegisterClass*, MachineOperand*>,
VirtReg2IndexFunctor> VRegInfo;
/// RegAllocHints - This vector records register allocation hints for virtual
/// registers. For each virtual register, it keeps a register and hint type
/// pair making up the allocation hint. Hint type is target specific except
/// for the value 0 which means the second value of the pair is the preferred
/// register for allocation. For example, if the hint is <0, 1024>, it means
/// the allocator should prefer the physical register allocated to the virtual
/// register of the hint.
IndexedMap<std::pair<unsigned, unsigned>, VirtReg2IndexFunctor> RegAllocHints;
/// PhysRegUseDefLists - This is an array of the head of the use/def list for
/// physical registers.
std::vector<MachineOperand *> PhysRegUseDefLists;
/// getRegUseDefListHead - Return the head pointer for the register use/def
/// list for the specified virtual or physical register.
MachineOperand *&getRegUseDefListHead(unsigned RegNo) {
if (TargetRegisterInfo::isVirtualRegister(RegNo))
return VRegInfo[RegNo].second;
return PhysRegUseDefLists[RegNo];
}
MachineOperand *getRegUseDefListHead(unsigned RegNo) const {
if (TargetRegisterInfo::isVirtualRegister(RegNo))
return VRegInfo[RegNo].second;
return PhysRegUseDefLists[RegNo];
}
/// Get the next element in the use-def chain.
static MachineOperand *getNextOperandForReg(const MachineOperand *MO) {
assert(MO && MO->isReg() && "This is not a register operand!");
return MO->Contents.Reg.Next;
}
/// UsedRegUnits - This is a bit vector that is computed and set by the
/// register allocator, and must be kept up to date by passes that run after
/// register allocation (though most don't modify this). This is used
/// so that the code generator knows which callee save registers to save and
/// for other target specific uses.
/// This vector has bits set for register units that are modified in the
/// current function. It doesn't include registers clobbered by function
/// calls with register mask operands.
BitVector UsedRegUnits;
/// UsedPhysRegMask - Additional used physregs including aliases.
/// This bit vector represents all the registers clobbered by function calls.
/// It can model things that UsedRegUnits can't, such as function calls that
/// clobber ymm7 but preserve the low half in xmm7.
BitVector UsedPhysRegMask;
/// ReservedRegs - This is a bit vector of reserved registers. The target
/// may change its mind about which registers should be reserved. This
/// vector is the frozen set of reserved registers when register allocation
/// started.
BitVector ReservedRegs;
/// Keep track of the physical registers that are live in to the function.
/// Live in values are typically arguments in registers. LiveIn values are
/// allowed to have virtual registers associated with them, stored in the
/// second element.
std::vector<std::pair<unsigned, unsigned> > LiveIns;
MachineRegisterInfo(const MachineRegisterInfo&) = delete;
void operator=(const MachineRegisterInfo&) = delete;
public:
explicit MachineRegisterInfo(const MachineFunction *MF);
const TargetRegisterInfo *getTargetRegisterInfo() const {
return MF->getSubtarget().getRegisterInfo();
}
void resetDelegate(Delegate *delegate) {
// Ensure another delegate does not take over unless the current
// delegate first unattaches itself. If we ever need to multicast
// notifications, we will need to change to using a list.
assert(TheDelegate == delegate &&
"Only the current delegate can perform reset!");
TheDelegate = nullptr;
}
void setDelegate(Delegate *delegate) {
assert(delegate && !TheDelegate &&
"Attempted to set delegate to null, or to change it without "
"first resetting it!");
TheDelegate = delegate;
}
//===--------------------------------------------------------------------===//
// Function State
//===--------------------------------------------------------------------===//
// isSSA - Returns true when the machine function is in SSA form. Early
// passes require the machine function to be in SSA form where every virtual
// register has a single defining instruction.
//
// The TwoAddressInstructionPass and PHIElimination passes take the machine
// function out of SSA form when they introduce multiple defs per virtual
// register.
bool isSSA() const { return IsSSA; }
// leaveSSA - Indicates that the machine function is no longer in SSA form.
void leaveSSA() { IsSSA = false; }
/// tracksLiveness - Returns true when tracking register liveness accurately.
///
/// While this flag is true, register liveness information in basic block
/// live-in lists and machine instruction operands is accurate. This means it
/// can be used to change the code in ways that affect the values in
/// registers, for example by the register scavenger.
///
/// When this flag is false, liveness is no longer reliable.
bool tracksLiveness() const { return TracksLiveness; }
/// invalidateLiveness - Indicates that register liveness is no longer being
/// tracked accurately.
///
/// This should be called by late passes that invalidate the liveness
/// information.
void invalidateLiveness() { TracksLiveness = false; }
/// Returns true if liveness for register class @p RC should be tracked at
/// the subregister level.
bool shouldTrackSubRegLiveness(const TargetRegisterClass &RC) const {
return subRegLivenessEnabled() && RC.HasDisjunctSubRegs;
}
bool shouldTrackSubRegLiveness(unsigned VReg) const {
assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Must pass a VReg");
return shouldTrackSubRegLiveness(*getRegClass(VReg));
}
bool subRegLivenessEnabled() const {
return TracksSubRegLiveness;
}
void enableSubRegLiveness(bool Enable = true) {
TracksSubRegLiveness = Enable;
}
//===--------------------------------------------------------------------===//
// Register Info
//===--------------------------------------------------------------------===//
// Strictly for use by MachineInstr.cpp.
void addRegOperandToUseList(MachineOperand *MO);
// Strictly for use by MachineInstr.cpp.
void removeRegOperandFromUseList(MachineOperand *MO);
// Strictly for use by MachineInstr.cpp.
void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps);
/// Verify the sanity of the use list for Reg.
void verifyUseList(unsigned Reg) const;
/// Verify the use list of all registers.
void verifyUseLists() const;
/// reg_begin/reg_end - Provide iteration support to walk over all definitions
/// and uses of a register within the MachineFunction that corresponds to this
/// MachineRegisterInfo object.
template<bool Uses, bool Defs, bool SkipDebug,
bool ByOperand, bool ByInstr, bool ByBundle>
class defusechain_iterator;
template<bool Uses, bool Defs, bool SkipDebug,
bool ByOperand, bool ByInstr, bool ByBundle>
class defusechain_instr_iterator;
// Make it a friend so it can access getNextOperandForReg().
template<bool, bool, bool, bool, bool, bool>
friend class defusechain_iterator;
template<bool, bool, bool, bool, bool, bool>
friend class defusechain_instr_iterator;
/// reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified
/// register.
typedef defusechain_iterator<true,true,false,true,false,false>
reg_iterator;
reg_iterator reg_begin(unsigned RegNo) const {
return reg_iterator(getRegUseDefListHead(RegNo));
}
static reg_iterator reg_end() { return reg_iterator(nullptr); }
inline iterator_range<reg_iterator> reg_operands(unsigned Reg) const {
return iterator_range<reg_iterator>(reg_begin(Reg), reg_end());
}
/// reg_instr_iterator/reg_instr_begin/reg_instr_end - Walk all defs and uses
/// of the specified register, stepping by MachineInstr.
typedef defusechain_instr_iterator<true,true,false,false,true,false>
reg_instr_iterator;
reg_instr_iterator reg_instr_begin(unsigned RegNo) const {
return reg_instr_iterator(getRegUseDefListHead(RegNo));
}
static reg_instr_iterator reg_instr_end() {
return reg_instr_iterator(nullptr);
}
inline iterator_range<reg_instr_iterator>
reg_instructions(unsigned Reg) const {
return iterator_range<reg_instr_iterator>(reg_instr_begin(Reg),
reg_instr_end());
}
/// reg_bundle_iterator/reg_bundle_begin/reg_bundle_end - Walk all defs and uses
/// of the specified register, stepping by bundle.
typedef defusechain_instr_iterator<true,true,false,false,false,true>
reg_bundle_iterator;
reg_bundle_iterator reg_bundle_begin(unsigned RegNo) const {
return reg_bundle_iterator(getRegUseDefListHead(RegNo));
}
static reg_bundle_iterator reg_bundle_end() {
return reg_bundle_iterator(nullptr);
}
inline iterator_range<reg_bundle_iterator> reg_bundles(unsigned Reg) const {
return iterator_range<reg_bundle_iterator>(reg_bundle_begin(Reg),
reg_bundle_end());
}
/// reg_empty - Return true if there are no instructions using or defining the
/// specified register (it may be live-in).
bool reg_empty(unsigned RegNo) const { return reg_begin(RegNo) == reg_end(); }
/// reg_nodbg_iterator/reg_nodbg_begin/reg_nodbg_end - Walk all defs and uses
/// of the specified register, skipping those marked as Debug.
typedef defusechain_iterator<true,true,true,true,false,false>
reg_nodbg_iterator;
reg_nodbg_iterator reg_nodbg_begin(unsigned RegNo) const {
return reg_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static reg_nodbg_iterator reg_nodbg_end() {
return reg_nodbg_iterator(nullptr);
}
inline iterator_range<reg_nodbg_iterator>
reg_nodbg_operands(unsigned Reg) const {
return iterator_range<reg_nodbg_iterator>(reg_nodbg_begin(Reg),
reg_nodbg_end());
}
/// reg_instr_nodbg_iterator/reg_instr_nodbg_begin/reg_instr_nodbg_end - Walk
/// all defs and uses of the specified register, stepping by MachineInstr,
/// skipping those marked as Debug.
typedef defusechain_instr_iterator<true,true,true,false,true,false>
reg_instr_nodbg_iterator;
reg_instr_nodbg_iterator reg_instr_nodbg_begin(unsigned RegNo) const {
return reg_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static reg_instr_nodbg_iterator reg_instr_nodbg_end() {
return reg_instr_nodbg_iterator(nullptr);
}
inline iterator_range<reg_instr_nodbg_iterator>
reg_nodbg_instructions(unsigned Reg) const {
return iterator_range<reg_instr_nodbg_iterator>(reg_instr_nodbg_begin(Reg),
reg_instr_nodbg_end());
}
/// reg_bundle_nodbg_iterator/reg_bundle_nodbg_begin/reg_bundle_nodbg_end - Walk
/// all defs and uses of the specified register, stepping by bundle,
/// skipping those marked as Debug.
typedef defusechain_instr_iterator<true,true,true,false,false,true>
reg_bundle_nodbg_iterator;
reg_bundle_nodbg_iterator reg_bundle_nodbg_begin(unsigned RegNo) const {
return reg_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static reg_bundle_nodbg_iterator reg_bundle_nodbg_end() {
return reg_bundle_nodbg_iterator(nullptr);
}
inline iterator_range<reg_bundle_nodbg_iterator>
reg_nodbg_bundles(unsigned Reg) const {
return iterator_range<reg_bundle_nodbg_iterator>(reg_bundle_nodbg_begin(Reg),
reg_bundle_nodbg_end());
}
/// reg_nodbg_empty - Return true if the only instructions using or defining
/// Reg are Debug instructions.
bool reg_nodbg_empty(unsigned RegNo) const {
return reg_nodbg_begin(RegNo) == reg_nodbg_end();
}
/// def_iterator/def_begin/def_end - Walk all defs of the specified register.
typedef defusechain_iterator<false,true,false,true,false,false>
def_iterator;
def_iterator def_begin(unsigned RegNo) const {
return def_iterator(getRegUseDefListHead(RegNo));
}
static def_iterator def_end() { return def_iterator(nullptr); }
inline iterator_range<def_iterator> def_operands(unsigned Reg) const {
return iterator_range<def_iterator>(def_begin(Reg), def_end());
}
/// def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the
/// specified register, stepping by MachineInst.
typedef defusechain_instr_iterator<false,true,false,false,true,false>
def_instr_iterator;
def_instr_iterator def_instr_begin(unsigned RegNo) const {
return def_instr_iterator(getRegUseDefListHead(RegNo));
}
static def_instr_iterator def_instr_end() {
return def_instr_iterator(nullptr);
}
inline iterator_range<def_instr_iterator>
def_instructions(unsigned Reg) const {
return iterator_range<def_instr_iterator>(def_instr_begin(Reg),
def_instr_end());
}
/// def_bundle_iterator/def_bundle_begin/def_bundle_end - Walk all defs of the
/// specified register, stepping by bundle.
typedef defusechain_instr_iterator<false,true,false,false,false,true>
def_bundle_iterator;
def_bundle_iterator def_bundle_begin(unsigned RegNo) const {
return def_bundle_iterator(getRegUseDefListHead(RegNo));
}
static def_bundle_iterator def_bundle_end() {
return def_bundle_iterator(nullptr);
}
inline iterator_range<def_bundle_iterator> def_bundles(unsigned Reg) const {
return iterator_range<def_bundle_iterator>(def_bundle_begin(Reg),
def_bundle_end());
}
/// def_empty - Return true if there are no instructions defining the
/// specified register (it may be live-in).
bool def_empty(unsigned RegNo) const { return def_begin(RegNo) == def_end(); }
/// hasOneDef - Return true if there is exactly one instruction defining the
/// specified register.
bool hasOneDef(unsigned RegNo) const {
def_iterator DI = def_begin(RegNo);
if (DI == def_end())
return false;
return ++DI == def_end();
}
/// use_iterator/use_begin/use_end - Walk all uses of the specified register.
typedef defusechain_iterator<true,false,false,true,false,false>
use_iterator;
use_iterator use_begin(unsigned RegNo) const {
return use_iterator(getRegUseDefListHead(RegNo));
}
static use_iterator use_end() { return use_iterator(nullptr); }
inline iterator_range<use_iterator> use_operands(unsigned Reg) const {
return iterator_range<use_iterator>(use_begin(Reg), use_end());
}
/// use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the
/// specified register, stepping by MachineInstr.
typedef defusechain_instr_iterator<true,false,false,false,true,false>
use_instr_iterator;
use_instr_iterator use_instr_begin(unsigned RegNo) const {
return use_instr_iterator(getRegUseDefListHead(RegNo));
}
static use_instr_iterator use_instr_end() {
return use_instr_iterator(nullptr);
}
inline iterator_range<use_instr_iterator>
use_instructions(unsigned Reg) const {
return iterator_range<use_instr_iterator>(use_instr_begin(Reg),
use_instr_end());
}
/// use_bundle_iterator/use_bundle_begin/use_bundle_end - Walk all uses of the
/// specified register, stepping by bundle.
typedef defusechain_instr_iterator<true,false,false,false,false,true>
use_bundle_iterator;
use_bundle_iterator use_bundle_begin(unsigned RegNo) const {
return use_bundle_iterator(getRegUseDefListHead(RegNo));
}
static use_bundle_iterator use_bundle_end() {
return use_bundle_iterator(nullptr);
}
inline iterator_range<use_bundle_iterator> use_bundles(unsigned Reg) const {
return iterator_range<use_bundle_iterator>(use_bundle_begin(Reg),
use_bundle_end());
}
/// use_empty - Return true if there are no instructions using the specified
/// register.
bool use_empty(unsigned RegNo) const { return use_begin(RegNo) == use_end(); }
/// hasOneUse - Return true if there is exactly one instruction using the
/// specified register.
bool hasOneUse(unsigned RegNo) const {
use_iterator UI = use_begin(RegNo);
if (UI == use_end())
return false;
return ++UI == use_end();
}
/// use_nodbg_iterator/use_nodbg_begin/use_nodbg_end - Walk all uses of the
/// specified register, skipping those marked as Debug.
typedef defusechain_iterator<true,false,true,true,false,false>
use_nodbg_iterator;
use_nodbg_iterator use_nodbg_begin(unsigned RegNo) const {
return use_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static use_nodbg_iterator use_nodbg_end() {
return use_nodbg_iterator(nullptr);
}
inline iterator_range<use_nodbg_iterator>
use_nodbg_operands(unsigned Reg) const {
return iterator_range<use_nodbg_iterator>(use_nodbg_begin(Reg),
use_nodbg_end());
}
/// use_instr_nodbg_iterator/use_instr_nodbg_begin/use_instr_nodbg_end - Walk
/// all uses of the specified register, stepping by MachineInstr, skipping
/// those marked as Debug.
typedef defusechain_instr_iterator<true,false,true,false,true,false>
use_instr_nodbg_iterator;
use_instr_nodbg_iterator use_instr_nodbg_begin(unsigned RegNo) const {
return use_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static use_instr_nodbg_iterator use_instr_nodbg_end() {
return use_instr_nodbg_iterator(nullptr);
}
inline iterator_range<use_instr_nodbg_iterator>
use_nodbg_instructions(unsigned Reg) const {
return iterator_range<use_instr_nodbg_iterator>(use_instr_nodbg_begin(Reg),
use_instr_nodbg_end());
}
/// use_bundle_nodbg_iterator/use_bundle_nodbg_begin/use_bundle_nodbg_end - Walk
/// all uses of the specified register, stepping by bundle, skipping
/// those marked as Debug.
typedef defusechain_instr_iterator<true,false,true,false,false,true>
use_bundle_nodbg_iterator;
use_bundle_nodbg_iterator use_bundle_nodbg_begin(unsigned RegNo) const {
return use_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static use_bundle_nodbg_iterator use_bundle_nodbg_end() {
return use_bundle_nodbg_iterator(nullptr);
}
inline iterator_range<use_bundle_nodbg_iterator>
use_nodbg_bundles(unsigned Reg) const {
return iterator_range<use_bundle_nodbg_iterator>(use_bundle_nodbg_begin(Reg),
use_bundle_nodbg_end());
}
/// use_nodbg_empty - Return true if there are no non-Debug instructions
/// using the specified register.
bool use_nodbg_empty(unsigned RegNo) const {
return use_nodbg_begin(RegNo) == use_nodbg_end();
}
/// hasOneNonDBGUse - Return true if there is exactly one non-Debug
/// instruction using the specified register.
bool hasOneNonDBGUse(unsigned RegNo) const;
/// replaceRegWith - Replace all instances of FromReg with ToReg in the
/// machine function. This is like llvm-level X->replaceAllUsesWith(Y),
/// except that it also changes any definitions of the register as well.
///
/// Note that it is usually necessary to first constrain ToReg's register
/// class to match the FromReg constraints using:
///
/// constrainRegClass(ToReg, getRegClass(FromReg))
///
/// That function will return NULL if the virtual registers have incompatible
/// constraints.
///
/// Note that if ToReg is a physical register the function will replace and
/// apply sub registers to ToReg in order to obtain a final/proper physical
/// register.
void replaceRegWith(unsigned FromReg, unsigned ToReg);
/// getVRegDef - Return the machine instr that defines the specified virtual
/// register or null if none is found. This assumes that the code is in SSA
/// form, so there should only be one definition.
MachineInstr *getVRegDef(unsigned Reg) const;
/// getUniqueVRegDef - Return the unique machine instr that defines the
/// specified virtual register or null if none is found. If there are
/// multiple definitions or no definition, return null.
MachineInstr *getUniqueVRegDef(unsigned Reg) const;
/// clearKillFlags - Iterate over all the uses of the given register and
/// clear the kill flag from the MachineOperand. This function is used by
/// optimization passes which extend register lifetimes and need only
/// preserve conservative kill flag information.
void clearKillFlags(unsigned Reg) const;
#ifndef NDEBUG
void dumpUses(unsigned RegNo) const;
#endif
/// isConstantPhysReg - Returns true if PhysReg is unallocatable and constant
/// throughout the function. It is safe to move instructions that read such
/// a physreg.
bool isConstantPhysReg(unsigned PhysReg, const MachineFunction &MF) const;
/// Get an iterator over the pressure sets affected by the given physical or
/// virtual register. If RegUnit is physical, it must be a register unit (from
/// MCRegUnitIterator).
PSetIterator getPressureSets(unsigned RegUnit) const;
//===--------------------------------------------------------------------===//
// Virtual Register Info
//===--------------------------------------------------------------------===//
/// getRegClass - Return the register class of the specified virtual register.
///
const TargetRegisterClass *getRegClass(unsigned Reg) const {
return VRegInfo[Reg].first;
}
/// setRegClass - Set the register class of the specified virtual register.
///
void setRegClass(unsigned Reg, const TargetRegisterClass *RC);
/// constrainRegClass - Constrain the register class of the specified virtual
/// register to be a common subclass of RC and the current register class,
/// but only if the new class has at least MinNumRegs registers. Return the
/// new register class, or NULL if no such class exists.
/// This should only be used when the constraint is known to be trivial, like
/// GR32 -> GR32_NOSP. Beware of increasing register pressure.
///
const TargetRegisterClass *constrainRegClass(unsigned Reg,
const TargetRegisterClass *RC,
unsigned MinNumRegs = 0);
/// recomputeRegClass - Try to find a legal super-class of Reg's register
/// class that still satisfies the constraints from the instructions using
/// Reg. Returns true if Reg was upgraded.
///
/// This method can be used after constraints have been removed from a
/// virtual register, for example after removing instructions or splitting
/// the live range.
///
bool recomputeRegClass(unsigned Reg);
/// createVirtualRegister - Create and return a new virtual register in the
/// function with the specified register class.
///
unsigned createVirtualRegister(const TargetRegisterClass *RegClass);
/// getNumVirtRegs - Return the number of virtual registers created.
///
unsigned getNumVirtRegs() const { return VRegInfo.size(); }
/// clearVirtRegs - Remove all virtual registers (after physreg assignment).
void clearVirtRegs();
/// setRegAllocationHint - Specify a register allocation hint for the
/// specified virtual register.
void setRegAllocationHint(unsigned VReg, unsigned Type, unsigned PrefReg) {
assert(TargetRegisterInfo::isVirtualRegister(VReg));
RegAllocHints[VReg].first = Type;
RegAllocHints[VReg].second = PrefReg;
}
/// getRegAllocationHint - Return the register allocation hint for the
/// specified virtual register.
std::pair<unsigned, unsigned>
getRegAllocationHint(unsigned VReg) const {
assert(TargetRegisterInfo::isVirtualRegister(VReg));
return RegAllocHints[VReg];
}
/// getSimpleHint - Return the preferred register allocation hint, or 0 if a
/// standard simple hint (Type == 0) is not set.
unsigned getSimpleHint(unsigned VReg) const {
assert(TargetRegisterInfo::isVirtualRegister(VReg));
std::pair<unsigned, unsigned> Hint = getRegAllocationHint(VReg);
return Hint.first ? 0 : Hint.second;
}
/// markUsesInDebugValueAsUndef - Mark every DBG_VALUE referencing the
/// specified register as undefined which causes the DBG_VALUE to be
/// deleted during LiveDebugVariables analysis.
void markUsesInDebugValueAsUndef(unsigned Reg) const;
/// Return true if the specified register is modified in this function.
/// This checks that no defining machine operands exist for the register or
/// any of its aliases. Definitions found on functions marked noreturn are
/// ignored.
bool isPhysRegModified(unsigned PhysReg) const;
//===--------------------------------------------------------------------===//
// Physical Register Use Info
//===--------------------------------------------------------------------===//
/// isPhysRegUsed - Return true if the specified register is used in this
/// function. Also check for clobbered aliases and registers clobbered by
/// function calls with register mask operands.
///
/// This only works after register allocation.
bool isPhysRegUsed(unsigned Reg) const {
if (UsedPhysRegMask.test(Reg))
return true;
for (MCRegUnitIterator Units(Reg, getTargetRegisterInfo());
Units.isValid(); ++Units)
if (UsedRegUnits.test(*Units))
return true;
return false;
}
/// Mark the specified register unit as used in this function.
/// This should only be called during and after register allocation.
void setRegUnitUsed(unsigned RegUnit) {
UsedRegUnits.set(RegUnit);
}
/// setPhysRegUsed - Mark the specified register used in this function.
/// This should only be called during and after register allocation.
void setPhysRegUsed(unsigned Reg) {
for (MCRegUnitIterator Units(Reg, getTargetRegisterInfo());
Units.isValid(); ++Units)
UsedRegUnits.set(*Units);
}
/// addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.
/// This corresponds to the bit mask attached to register mask operands.
void addPhysRegsUsedFromRegMask(const uint32_t *RegMask) {
UsedPhysRegMask.setBitsNotInMask(RegMask);
}
/// setPhysRegUnused - Mark the specified register unused in this function.
/// This should only be called during and after register allocation.
void setPhysRegUnused(unsigned Reg) {
UsedPhysRegMask.reset(Reg);
for (MCRegUnitIterator Units(Reg, getTargetRegisterInfo());
Units.isValid(); ++Units)
UsedRegUnits.reset(*Units);
}
//===--------------------------------------------------------------------===//
// Reserved Register Info
//===--------------------------------------------------------------------===//
//
// The set of reserved registers must be invariant during register
// allocation. For example, the target cannot suddenly decide it needs a
// frame pointer when the register allocator has already used the frame
// pointer register for something else.
//
// These methods can be used by target hooks like hasFP() to avoid changing
// the reserved register set during register allocation.
/// freezeReservedRegs - Called by the register allocator to freeze the set
/// of reserved registers before allocation begins.
void freezeReservedRegs(const MachineFunction&);
/// reservedRegsFrozen - Returns true after freezeReservedRegs() was called
/// to ensure the set of reserved registers stays constant.
bool reservedRegsFrozen() const {
return !ReservedRegs.empty();
}
/// canReserveReg - Returns true if PhysReg can be used as a reserved
/// register. Any register can be reserved before freezeReservedRegs() is
/// called.
bool canReserveReg(unsigned PhysReg) const {
return !reservedRegsFrozen() || ReservedRegs.test(PhysReg);
}
/// getReservedRegs - Returns a reference to the frozen set of reserved
/// registers. This method should always be preferred to calling
/// TRI::getReservedRegs() when possible.
const BitVector &getReservedRegs() const {
assert(reservedRegsFrozen() &&
"Reserved registers haven't been frozen yet. "
"Use TRI::getReservedRegs().");
return ReservedRegs;
}
/// isReserved - Returns true when PhysReg is a reserved register.
///
/// Reserved registers may belong to an allocatable register class, but the
/// target has explicitly requested that they are not used.
///
bool isReserved(unsigned PhysReg) const {
return getReservedRegs().test(PhysReg);
}
/// isAllocatable - Returns true when PhysReg belongs to an allocatable
/// register class and it hasn't been reserved.
///
/// Allocatable registers may show up in the allocation order of some virtual
/// register, so a register allocator needs to track its liveness and
/// availability.
bool isAllocatable(unsigned PhysReg) const {
return getTargetRegisterInfo()->isInAllocatableClass(PhysReg) &&
!isReserved(PhysReg);
}
//===--------------------------------------------------------------------===//
// LiveIn Management
//===--------------------------------------------------------------------===//
/// addLiveIn - Add the specified register as a live-in. Note that it
/// is an error to add the same register to the same set more than once.
void addLiveIn(unsigned Reg, unsigned vreg = 0) {
LiveIns.push_back(std::make_pair(Reg, vreg));
}
// Iteration support for the live-ins set. It's kept in sorted order
// by register number.
typedef std::vector<std::pair<unsigned,unsigned> >::const_iterator
livein_iterator;
livein_iterator livein_begin() const { return LiveIns.begin(); }
livein_iterator livein_end() const { return LiveIns.end(); }
bool livein_empty() const { return LiveIns.empty(); }
bool isLiveIn(unsigned Reg) const;
/// getLiveInPhysReg - If VReg is a live-in virtual register, return the
/// corresponding live-in physical register.
unsigned getLiveInPhysReg(unsigned VReg) const;
/// getLiveInVirtReg - If PReg is a live-in physical register, return the
/// corresponding live-in physical register.
unsigned getLiveInVirtReg(unsigned PReg) const;
/// EmitLiveInCopies - Emit copies to initialize livein virtual registers
/// into the given entry block.
void EmitLiveInCopies(MachineBasicBlock *EntryMBB,
const TargetRegisterInfo &TRI,
const TargetInstrInfo &TII);
/// Returns a mask covering all bits that can appear in lane masks of
/// subregisters of the virtual register @p Reg.
unsigned getMaxLaneMaskForVReg(unsigned Reg) const;
/// defusechain_iterator - This class provides iterator support for machine
/// operands in the function that use or define a specific register. If
/// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
/// returns defs. If neither are true then you are silly and it always
/// returns end(). If SkipDebug is true it skips uses marked Debug
/// when incrementing.
template<bool ReturnUses, bool ReturnDefs, bool SkipDebug,
bool ByOperand, bool ByInstr, bool ByBundle>
class defusechain_iterator {
public:
using iterator_category = std::forward_iterator_tag;
using value_type = MachineInstr;
using difference_type = std::ptrdiff_t;
using pointer = value_type *;
using reference = value_type &;
private:
MachineOperand *Op;
explicit defusechain_iterator(MachineOperand *op) : Op(op) {
// If the first node isn't one we're interested in, advance to one that
// we are interested in.
if (op) {
if ((!ReturnUses && op->isUse()) ||
(!ReturnDefs && op->isDef()) ||
(SkipDebug && op->isDebug()))
advance();
}
}
friend class MachineRegisterInfo;
void advance() {
assert(Op && "Cannot increment end iterator!");
Op = getNextOperandForReg(Op);
// All defs come before the uses, so stop def_iterator early.
if (!ReturnUses) {
if (Op) {
if (Op->isUse())
Op = nullptr;
else
assert(!Op->isDebug() && "Can't have debug defs");
}
} else {
// If this is an operand we don't care about, skip it.
while (Op && ((!ReturnDefs && Op->isDef()) ||
(SkipDebug && Op->isDebug())))
Op = getNextOperandForReg(Op);
}
}
public:
defusechain_iterator() : Op(nullptr) {}
bool operator==(const defusechain_iterator &x) const {
return Op == x.Op;
}
bool operator!=(const defusechain_iterator &x) const {
return !operator==(x);
}
/// atEnd - return true if this iterator is equal to reg_end() on the value.
bool atEnd() const { return Op == nullptr; }
// Iterator traversal: forward iteration only
defusechain_iterator &operator++() { // Preincrement
assert(Op && "Cannot increment end iterator!");
if (ByOperand)
advance();
else if (ByInstr) {
MachineInstr *P = Op->getParent();
do {
advance();
} while (Op && Op->getParent() == P);
} else if (ByBundle) {
MachineInstr *P = getBundleStart(Op->getParent());
do {
advance();
} while (Op && getBundleStart(Op->getParent()) == P);
}
return *this;
}
defusechain_iterator operator++(int) { // Postincrement
defusechain_iterator tmp = *this; ++*this; return tmp;
}
/// getOperandNo - Return the operand # of this MachineOperand in its
/// MachineInstr.
unsigned getOperandNo() const {
assert(Op && "Cannot dereference end iterator!");
return Op - &Op->getParent()->getOperand(0);
}
// Retrieve a reference to the current operand.
MachineOperand &operator*() const {
assert(Op && "Cannot dereference end iterator!");
return *Op;
}
MachineOperand *operator->() const {
assert(Op && "Cannot dereference end iterator!");
return Op;
}
};
/// defusechain_iterator - This class provides iterator support for machine
/// operands in the function that use or define a specific register. If
/// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
/// returns defs. If neither are true then you are silly and it always
/// returns end(). If SkipDebug is true it skips uses marked Debug
/// when incrementing.
template<bool ReturnUses, bool ReturnDefs, bool SkipDebug,
bool ByOperand, bool ByInstr, bool ByBundle>
class defusechain_instr_iterator {
public:
using iterator_category = std::forward_iterator_tag;
using value_type = MachineInstr;
using difference_type = std::ptrdiff_t;
using pointer = value_type *;
using reference = value_type &;
private:
MachineOperand *Op;
explicit defusechain_instr_iterator(MachineOperand *op) : Op(op) {
// If the first node isn't one we're interested in, advance to one that
// we are interested in.
if (op) {
if ((!ReturnUses && op->isUse()) ||
(!ReturnDefs && op->isDef()) ||
(SkipDebug && op->isDebug()))
advance();
}
}
friend class MachineRegisterInfo;
void advance() {
assert(Op && "Cannot increment end iterator!");
Op = getNextOperandForReg(Op);
// All defs come before the uses, so stop def_iterator early.
if (!ReturnUses) {
if (Op) {
if (Op->isUse())
Op = nullptr;
else
assert(!Op->isDebug() && "Can't have debug defs");
}
} else {
// If this is an operand we don't care about, skip it.
while (Op && ((!ReturnDefs && Op->isDef()) ||
(SkipDebug && Op->isDebug())))
Op = getNextOperandForReg(Op);
}
}
public:
defusechain_instr_iterator() : Op(nullptr) {}
bool operator==(const defusechain_instr_iterator &x) const {
return Op == x.Op;
}
bool operator!=(const defusechain_instr_iterator &x) const {
return !operator==(x);
}
/// atEnd - return true if this iterator is equal to reg_end() on the value.
bool atEnd() const { return Op == nullptr; }
// Iterator traversal: forward iteration only
defusechain_instr_iterator &operator++() { // Preincrement
assert(Op && "Cannot increment end iterator!");
if (ByOperand)
advance();
else if (ByInstr) {
MachineInstr *P = Op->getParent();
do {
advance();
} while (Op && Op->getParent() == P);
} else if (ByBundle) {
MachineInstr *P = getBundleStart(Op->getParent());
do {
advance();
} while (Op && getBundleStart(Op->getParent()) == P);
}
return *this;
}
defusechain_instr_iterator operator++(int) { // Postincrement
defusechain_instr_iterator tmp = *this; ++*this; return tmp;
}
// Retrieve a reference to the current operand.
MachineInstr &operator*() const {
assert(Op && "Cannot dereference end iterator!");
if (ByBundle) return *(getBundleStart(Op->getParent()));
return *Op->getParent();
}
MachineInstr *operator->() const {
assert(Op && "Cannot dereference end iterator!");
if (ByBundle) return getBundleStart(Op->getParent());
return Op->getParent();
}
};
};
/// Iterate over the pressure sets affected by the given physical or virtual
/// register. If Reg is physical, it must be a register unit (from
/// MCRegUnitIterator).
class PSetIterator {
const int *PSet;
unsigned Weight;
public:
PSetIterator(): PSet(nullptr), Weight(0) {}
PSetIterator(unsigned RegUnit, const MachineRegisterInfo *MRI) {
const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
if (TargetRegisterInfo::isVirtualRegister(RegUnit)) {
const TargetRegisterClass *RC = MRI->getRegClass(RegUnit);
PSet = TRI->getRegClassPressureSets(RC);
Weight = TRI->getRegClassWeight(RC).RegWeight;
}
else {
PSet = TRI->getRegUnitPressureSets(RegUnit);
Weight = TRI->getRegUnitWeight(RegUnit);
}
if (*PSet == -1)
PSet = nullptr;
}
bool isValid() const { return PSet; }
unsigned getWeight() const { return Weight; }
unsigned operator*() const { return *PSet; }
void operator++() {
assert(isValid() && "Invalid PSetIterator.");
++PSet;
if (*PSet == -1)
PSet = nullptr;
}
};
inline PSetIterator MachineRegisterInfo::
getPressureSets(unsigned RegUnit) const {
return PSetIterator(RegUnit, this);
}
} // End llvm namespace
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.