Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/CodeGenSchedule.cpp | //===- CodeGenSchedule.cpp - Scheduling MachineModels ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines structures to encapsulate the machine model as described in
// the target description.
//
//===----------------------------------------------------------------------===//
#include "CodeGenSchedule.h"
#include "CodeGenTarget.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Regex.h"
#include "llvm/TableGen/Error.h"
using namespace llvm;
#define DEBUG_TYPE "subtarget-emitter"
#ifndef NDEBUG
static void dumpIdxVec(const IdxVec &V) {
for (unsigned i = 0, e = V.size(); i < e; ++i) {
dbgs() << V[i] << ", ";
}
}
static void dumpIdxVec(const SmallVectorImpl<unsigned> &V) {
for (unsigned i = 0, e = V.size(); i < e; ++i) {
dbgs() << V[i] << ", ";
}
}
#endif
namespace {
// (instrs a, b, ...) Evaluate and union all arguments. Identical to AddOp.
struct InstrsOp : public SetTheory::Operator {
void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts,
ArrayRef<SMLoc> Loc) override {
ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc);
}
};
// (instregex "OpcPat",...) Find all instructions matching an opcode pattern.
//
// TODO: Since this is a prefix match, perform a binary search over the
// instruction names using lower_bound. Note that the predefined instrs must be
// scanned linearly first. However, this is only safe if the regex pattern has
// no top-level bars. The DAG already has a list of patterns, so there's no
// reason to use top-level bars, but we need a way to verify they don't exist
// before implementing the optimization.
struct InstRegexOp : public SetTheory::Operator {
const CodeGenTarget &Target;
InstRegexOp(const CodeGenTarget &t): Target(t) {}
void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts,
ArrayRef<SMLoc> Loc) override {
SmallVector<Regex, 4> RegexList;
for (DagInit::const_arg_iterator
AI = Expr->arg_begin(), AE = Expr->arg_end(); AI != AE; ++AI) {
StringInit *SI = dyn_cast<StringInit>(*AI);
if (!SI)
PrintFatalError(Loc, "instregex requires pattern string: "
+ Expr->getAsString());
std::string pat = SI->getValue();
// Implement a python-style prefix match.
if (pat[0] != '^') {
pat.insert(0, "^(");
pat.insert(pat.end(), ')');
}
RegexList.push_back(Regex(pat));
}
for (const CodeGenInstruction *Inst : Target.instructions()) {
for (auto &R : RegexList) {
if (R.match(Inst->TheDef->getName()))
Elts.insert(Inst->TheDef);
}
}
}
};
} // end anonymous namespace
/// CodeGenModels ctor interprets machine model records and populates maps.
CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK,
const CodeGenTarget &TGT):
Records(RK), Target(TGT) {
Sets.addFieldExpander("InstRW", "Instrs");
// Allow Set evaluation to recognize the dags used in InstRW records:
// (instrs Op1, Op1...)
Sets.addOperator("instrs", llvm::make_unique<InstrsOp>());
Sets.addOperator("instregex", llvm::make_unique<InstRegexOp>(Target));
// Instantiate a CodeGenProcModel for each SchedMachineModel with the values
// that are explicitly referenced in tablegen records. Resources associated
// with each processor will be derived later. Populate ProcModelMap with the
// CodeGenProcModel instances.
collectProcModels();
// Instantiate a CodeGenSchedRW for each SchedReadWrite record explicitly
// defined, and populate SchedReads and SchedWrites vectors. Implicit
// SchedReadWrites that represent sequences derived from expanded variant will
// be inferred later.
collectSchedRW();
// Instantiate a CodeGenSchedClass for each unique SchedRW signature directly
// required by an instruction definition, and populate SchedClassIdxMap. Set
// NumItineraryClasses to the number of explicit itinerary classes referenced
// by instructions. Set NumInstrSchedClasses to the number of itinerary
// classes plus any classes implied by instructions that derive from class
// Sched and provide SchedRW list. This does not infer any new classes from
// SchedVariant.
collectSchedClasses();
// Find instruction itineraries for each processor. Sort and populate
// CodeGenProcModel::ItinDefList. (Cycle-to-cycle itineraries). This requires
// all itinerary classes to be discovered.
collectProcItins();
// Find ItinRW records for each processor and itinerary class.
// (For per-operand resources mapped to itinerary classes).
collectProcItinRW();
// Infer new SchedClasses from SchedVariant.
inferSchedClasses();
// Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and
// ProcResourceDefs.
collectProcResources();
}
/// Gather all processor models.
void CodeGenSchedModels::collectProcModels() {
RecVec ProcRecords = Records.getAllDerivedDefinitions("Processor");
std::sort(ProcRecords.begin(), ProcRecords.end(), LessRecordFieldName());
// Reserve space because we can. Reallocation would be ok.
ProcModels.reserve(ProcRecords.size()+1);
// Use idx=0 for NoModel/NoItineraries.
Record *NoModelDef = Records.getDef("NoSchedModel");
Record *NoItinsDef = Records.getDef("NoItineraries");
ProcModels.emplace_back(0, "NoSchedModel", NoModelDef, NoItinsDef);
ProcModelMap[NoModelDef] = 0;
// For each processor, find a unique machine model.
for (unsigned i = 0, N = ProcRecords.size(); i < N; ++i)
addProcModel(ProcRecords[i]);
}
/// Get a unique processor model based on the defined MachineModel and
/// ProcessorItineraries.
void CodeGenSchedModels::addProcModel(Record *ProcDef) {
Record *ModelKey = getModelOrItinDef(ProcDef);
if (!ProcModelMap.insert(std::make_pair(ModelKey, ProcModels.size())).second)
return;
std::string Name = ModelKey->getName();
if (ModelKey->isSubClassOf("SchedMachineModel")) {
Record *ItinsDef = ModelKey->getValueAsDef("Itineraries");
ProcModels.emplace_back(ProcModels.size(), Name, ModelKey, ItinsDef);
}
else {
// An itinerary is defined without a machine model. Infer a new model.
if (!ModelKey->getValueAsListOfDefs("IID").empty())
Name = Name + "Model";
ProcModels.emplace_back(ProcModels.size(), Name,
ProcDef->getValueAsDef("SchedModel"), ModelKey);
}
DEBUG(ProcModels.back().dump());
}
// Recursively find all reachable SchedReadWrite records.
static void scanSchedRW(Record *RWDef, RecVec &RWDefs,
SmallPtrSet<Record*, 16> &RWSet) {
if (!RWSet.insert(RWDef).second)
return;
RWDefs.push_back(RWDef);
// Reads don't current have sequence records, but it can be added later.
if (RWDef->isSubClassOf("WriteSequence")) {
RecVec Seq = RWDef->getValueAsListOfDefs("Writes");
for (RecIter I = Seq.begin(), E = Seq.end(); I != E; ++I)
scanSchedRW(*I, RWDefs, RWSet);
}
else if (RWDef->isSubClassOf("SchedVariant")) {
// Visit each variant (guarded by a different predicate).
RecVec Vars = RWDef->getValueAsListOfDefs("Variants");
for (RecIter VI = Vars.begin(), VE = Vars.end(); VI != VE; ++VI) {
// Visit each RW in the sequence selected by the current variant.
RecVec Selected = (*VI)->getValueAsListOfDefs("Selected");
for (RecIter I = Selected.begin(), E = Selected.end(); I != E; ++I)
scanSchedRW(*I, RWDefs, RWSet);
}
}
}
// Collect and sort all SchedReadWrites reachable via tablegen records.
// More may be inferred later when inferring new SchedClasses from variants.
void CodeGenSchedModels::collectSchedRW() {
// Reserve idx=0 for invalid writes/reads.
SchedWrites.resize(1);
SchedReads.resize(1);
SmallPtrSet<Record*, 16> RWSet;
// Find all SchedReadWrites referenced by instruction defs.
RecVec SWDefs, SRDefs;
for (const CodeGenInstruction *Inst : Target.instructions()) {
Record *SchedDef = Inst->TheDef;
if (SchedDef->isValueUnset("SchedRW"))
continue;
RecVec RWs = SchedDef->getValueAsListOfDefs("SchedRW");
for (RecIter RWI = RWs.begin(), RWE = RWs.end(); RWI != RWE; ++RWI) {
if ((*RWI)->isSubClassOf("SchedWrite"))
scanSchedRW(*RWI, SWDefs, RWSet);
else {
assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
scanSchedRW(*RWI, SRDefs, RWSet);
}
}
}
// Find all ReadWrites referenced by InstRW.
RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
for (RecIter OI = InstRWDefs.begin(), OE = InstRWDefs.end(); OI != OE; ++OI) {
// For all OperandReadWrites.
RecVec RWDefs = (*OI)->getValueAsListOfDefs("OperandReadWrites");
for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end();
RWI != RWE; ++RWI) {
if ((*RWI)->isSubClassOf("SchedWrite"))
scanSchedRW(*RWI, SWDefs, RWSet);
else {
assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
scanSchedRW(*RWI, SRDefs, RWSet);
}
}
}
// Find all ReadWrites referenced by ItinRW.
RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW");
for (RecIter II = ItinRWDefs.begin(), IE = ItinRWDefs.end(); II != IE; ++II) {
// For all OperandReadWrites.
RecVec RWDefs = (*II)->getValueAsListOfDefs("OperandReadWrites");
for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end();
RWI != RWE; ++RWI) {
if ((*RWI)->isSubClassOf("SchedWrite"))
scanSchedRW(*RWI, SWDefs, RWSet);
else {
assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
scanSchedRW(*RWI, SRDefs, RWSet);
}
}
}
// Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted
// for the loop below that initializes Alias vectors.
RecVec AliasDefs = Records.getAllDerivedDefinitions("SchedAlias");
std::sort(AliasDefs.begin(), AliasDefs.end(), LessRecord());
for (RecIter AI = AliasDefs.begin(), AE = AliasDefs.end(); AI != AE; ++AI) {
Record *MatchDef = (*AI)->getValueAsDef("MatchRW");
Record *AliasDef = (*AI)->getValueAsDef("AliasRW");
if (MatchDef->isSubClassOf("SchedWrite")) {
if (!AliasDef->isSubClassOf("SchedWrite"))
PrintFatalError((*AI)->getLoc(), "SchedWrite Alias must be SchedWrite");
scanSchedRW(AliasDef, SWDefs, RWSet);
}
else {
assert(MatchDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
if (!AliasDef->isSubClassOf("SchedRead"))
PrintFatalError((*AI)->getLoc(), "SchedRead Alias must be SchedRead");
scanSchedRW(AliasDef, SRDefs, RWSet);
}
}
// Sort and add the SchedReadWrites directly referenced by instructions or
// itinerary resources. Index reads and writes in separate domains.
std::sort(SWDefs.begin(), SWDefs.end(), LessRecord());
for (RecIter SWI = SWDefs.begin(), SWE = SWDefs.end(); SWI != SWE; ++SWI) {
assert(!getSchedRWIdx(*SWI, /*IsRead=*/false) && "duplicate SchedWrite");
SchedWrites.emplace_back(SchedWrites.size(), *SWI);
}
std::sort(SRDefs.begin(), SRDefs.end(), LessRecord());
for (RecIter SRI = SRDefs.begin(), SRE = SRDefs.end(); SRI != SRE; ++SRI) {
assert(!getSchedRWIdx(*SRI, /*IsRead-*/true) && "duplicate SchedWrite");
SchedReads.emplace_back(SchedReads.size(), *SRI);
}
// Initialize WriteSequence vectors.
for (std::vector<CodeGenSchedRW>::iterator WI = SchedWrites.begin(),
WE = SchedWrites.end(); WI != WE; ++WI) {
if (!WI->IsSequence)
continue;
findRWs(WI->TheDef->getValueAsListOfDefs("Writes"), WI->Sequence,
/*IsRead=*/false);
}
// Initialize Aliases vectors.
for (RecIter AI = AliasDefs.begin(), AE = AliasDefs.end(); AI != AE; ++AI) {
Record *AliasDef = (*AI)->getValueAsDef("AliasRW");
getSchedRW(AliasDef).IsAlias = true;
Record *MatchDef = (*AI)->getValueAsDef("MatchRW");
CodeGenSchedRW &RW = getSchedRW(MatchDef);
if (RW.IsAlias)
PrintFatalError((*AI)->getLoc(), "Cannot Alias an Alias");
RW.Aliases.push_back(*AI);
}
DEBUG(
for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) {
dbgs() << WIdx << ": ";
SchedWrites[WIdx].dump();
dbgs() << '\n';
}
for (unsigned RIdx = 0, REnd = SchedReads.size(); RIdx != REnd; ++RIdx) {
dbgs() << RIdx << ": ";
SchedReads[RIdx].dump();
dbgs() << '\n';
}
RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite");
for (RecIter RI = RWDefs.begin(), RE = RWDefs.end();
RI != RE; ++RI) {
if (!getSchedRWIdx(*RI, (*RI)->isSubClassOf("SchedRead"))) {
const std::string &Name = (*RI)->getName();
if (Name != "NoWrite" && Name != "ReadDefault")
dbgs() << "Unused SchedReadWrite " << (*RI)->getName() << '\n';
}
});
}
/// Compute a SchedWrite name from a sequence of writes.
std::string CodeGenSchedModels::genRWName(const IdxVec& Seq, bool IsRead) {
std::string Name("(");
for (IdxIter I = Seq.begin(), E = Seq.end(); I != E; ++I) {
if (I != Seq.begin())
Name += '_';
Name += getSchedRW(*I, IsRead).Name;
}
Name += ')';
return Name;
}
unsigned CodeGenSchedModels::getSchedRWIdx(Record *Def, bool IsRead,
unsigned After) const {
const std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
assert(After < RWVec.size() && "start position out of bounds");
for (std::vector<CodeGenSchedRW>::const_iterator I = RWVec.begin() + After,
E = RWVec.end(); I != E; ++I) {
if (I->TheDef == Def)
return I - RWVec.begin();
}
return 0;
}
bool CodeGenSchedModels::hasReadOfWrite(Record *WriteDef) const {
for (unsigned i = 0, e = SchedReads.size(); i < e; ++i) {
Record *ReadDef = SchedReads[i].TheDef;
if (!ReadDef || !ReadDef->isSubClassOf("ProcReadAdvance"))
continue;
RecVec ValidWrites = ReadDef->getValueAsListOfDefs("ValidWrites");
if (std::find(ValidWrites.begin(), ValidWrites.end(), WriteDef)
!= ValidWrites.end()) {
return true;
}
}
return false;
}
namespace llvm {
void splitSchedReadWrites(const RecVec &RWDefs,
RecVec &WriteDefs, RecVec &ReadDefs) {
for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); RWI != RWE; ++RWI) {
if ((*RWI)->isSubClassOf("SchedWrite"))
WriteDefs.push_back(*RWI);
else {
assert((*RWI)->isSubClassOf("SchedRead") && "unknown SchedReadWrite");
ReadDefs.push_back(*RWI);
}
}
}
} // namespace llvm
// Split the SchedReadWrites defs and call findRWs for each list.
void CodeGenSchedModels::findRWs(const RecVec &RWDefs,
IdxVec &Writes, IdxVec &Reads) const {
RecVec WriteDefs;
RecVec ReadDefs;
splitSchedReadWrites(RWDefs, WriteDefs, ReadDefs);
findRWs(WriteDefs, Writes, false);
findRWs(ReadDefs, Reads, true);
}
// Call getSchedRWIdx for all elements in a sequence of SchedRW defs.
void CodeGenSchedModels::findRWs(const RecVec &RWDefs, IdxVec &RWs,
bool IsRead) const {
for (RecIter RI = RWDefs.begin(), RE = RWDefs.end(); RI != RE; ++RI) {
unsigned Idx = getSchedRWIdx(*RI, IsRead);
assert(Idx && "failed to collect SchedReadWrite");
RWs.push_back(Idx);
}
}
void CodeGenSchedModels::expandRWSequence(unsigned RWIdx, IdxVec &RWSeq,
bool IsRead) const {
const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead);
if (!SchedRW.IsSequence) {
RWSeq.push_back(RWIdx);
return;
}
int Repeat =
SchedRW.TheDef ? SchedRW.TheDef->getValueAsInt("Repeat") : 1;
for (int i = 0; i < Repeat; ++i) {
for (IdxIter I = SchedRW.Sequence.begin(), E = SchedRW.Sequence.end();
I != E; ++I) {
expandRWSequence(*I, RWSeq, IsRead);
}
}
}
// Expand a SchedWrite as a sequence following any aliases that coincide with
// the given processor model.
void CodeGenSchedModels::expandRWSeqForProc(
unsigned RWIdx, IdxVec &RWSeq, bool IsRead,
const CodeGenProcModel &ProcModel) const {
const CodeGenSchedRW &SchedWrite = getSchedRW(RWIdx, IsRead);
Record *AliasDef = nullptr;
for (RecIter AI = SchedWrite.Aliases.begin(), AE = SchedWrite.Aliases.end();
AI != AE; ++AI) {
const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW"));
if ((*AI)->getValueInit("SchedModel")->isComplete()) {
Record *ModelDef = (*AI)->getValueAsDef("SchedModel");
if (&getProcModel(ModelDef) != &ProcModel)
continue;
}
if (AliasDef)
PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
"defined for processor " + ProcModel.ModelName +
" Ensure only one SchedAlias exists per RW.");
AliasDef = AliasRW.TheDef;
}
if (AliasDef) {
expandRWSeqForProc(getSchedRWIdx(AliasDef, IsRead),
RWSeq, IsRead,ProcModel);
return;
}
if (!SchedWrite.IsSequence) {
RWSeq.push_back(RWIdx);
return;
}
int Repeat =
SchedWrite.TheDef ? SchedWrite.TheDef->getValueAsInt("Repeat") : 1;
for (int i = 0; i < Repeat; ++i) {
for (IdxIter I = SchedWrite.Sequence.begin(), E = SchedWrite.Sequence.end();
I != E; ++I) {
expandRWSeqForProc(*I, RWSeq, IsRead, ProcModel);
}
}
}
// Find the existing SchedWrite that models this sequence of writes.
unsigned CodeGenSchedModels::findRWForSequence(const IdxVec &Seq,
bool IsRead) {
std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
for (std::vector<CodeGenSchedRW>::iterator I = RWVec.begin(), E = RWVec.end();
I != E; ++I) {
if (I->Sequence == Seq)
return I - RWVec.begin();
}
// Index zero reserved for invalid RW.
return 0;
}
/// Add this ReadWrite if it doesn't already exist.
unsigned CodeGenSchedModels::findOrInsertRW(ArrayRef<unsigned> Seq,
bool IsRead) {
assert(!Seq.empty() && "cannot insert empty sequence");
if (Seq.size() == 1)
return Seq.back();
unsigned Idx = findRWForSequence(Seq, IsRead);
if (Idx)
return Idx;
unsigned RWIdx = IsRead ? SchedReads.size() : SchedWrites.size();
CodeGenSchedRW SchedRW(RWIdx, IsRead, Seq, genRWName(Seq, IsRead));
if (IsRead)
SchedReads.push_back(SchedRW);
else
SchedWrites.push_back(SchedRW);
return RWIdx;
}
/// Visit all the instruction definitions for this target to gather and
/// enumerate the itinerary classes. These are the explicitly specified
/// SchedClasses. More SchedClasses may be inferred.
void CodeGenSchedModels::collectSchedClasses() {
// NoItinerary is always the first class at Idx=0
SchedClasses.resize(1);
SchedClasses.back().Index = 0;
SchedClasses.back().Name = "NoInstrModel";
SchedClasses.back().ItinClassDef = Records.getDef("NoItinerary");
SchedClasses.back().ProcIndices.push_back(0);
// Create a SchedClass for each unique combination of itinerary class and
// SchedRW list.
for (const CodeGenInstruction *Inst : Target.instructions()) {
Record *ItinDef = Inst->TheDef->getValueAsDef("Itinerary");
IdxVec Writes, Reads;
if (!Inst->TheDef->isValueUnset("SchedRW"))
findRWs(Inst->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads);
// ProcIdx == 0 indicates the class applies to all processors.
IdxVec ProcIndices(1, 0);
unsigned SCIdx = addSchedClass(ItinDef, Writes, Reads, ProcIndices);
InstrClassMap[Inst->TheDef] = SCIdx;
}
// Create classes for InstRW defs.
RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
std::sort(InstRWDefs.begin(), InstRWDefs.end(), LessRecord());
for (RecIter OI = InstRWDefs.begin(), OE = InstRWDefs.end(); OI != OE; ++OI)
createInstRWClass(*OI);
NumInstrSchedClasses = SchedClasses.size();
bool EnableDump = false;
DEBUG(EnableDump = true);
if (!EnableDump)
return;
for (const CodeGenInstruction *Inst : Target.instructions()) {
std::string InstName = Inst->TheDef->getName();
unsigned SCIdx = InstrClassMap.lookup(Inst->TheDef);
if (!SCIdx) {
dbgs() << "No machine model for " << Inst->TheDef->getName() << '\n';
continue;
}
CodeGenSchedClass &SC = getSchedClass(SCIdx);
if (SC.ProcIndices[0] != 0)
PrintFatalError(Inst->TheDef->getLoc(), "Instruction's sched class "
"must not be subtarget specific.");
IdxVec ProcIndices;
if (SC.ItinClassDef->getName() != "NoItinerary") {
ProcIndices.push_back(0);
dbgs() << "Itinerary for " << InstName << ": "
<< SC.ItinClassDef->getName() << '\n';
}
if (!SC.Writes.empty()) {
ProcIndices.push_back(0);
dbgs() << "SchedRW machine model for " << InstName;
for (IdxIter WI = SC.Writes.begin(), WE = SC.Writes.end(); WI != WE; ++WI)
dbgs() << " " << SchedWrites[*WI].Name;
for (IdxIter RI = SC.Reads.begin(), RE = SC.Reads.end(); RI != RE; ++RI)
dbgs() << " " << SchedReads[*RI].Name;
dbgs() << '\n';
}
const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs;
for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end();
RWI != RWE; ++RWI) {
const CodeGenProcModel &ProcModel =
getProcModel((*RWI)->getValueAsDef("SchedModel"));
ProcIndices.push_back(ProcModel.Index);
dbgs() << "InstRW on " << ProcModel.ModelName << " for " << InstName;
IdxVec Writes;
IdxVec Reads;
findRWs((*RWI)->getValueAsListOfDefs("OperandReadWrites"),
Writes, Reads);
for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI)
dbgs() << " " << SchedWrites[*WI].Name;
for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI)
dbgs() << " " << SchedReads[*RI].Name;
dbgs() << '\n';
}
for (std::vector<CodeGenProcModel>::iterator PI = ProcModels.begin(),
PE = ProcModels.end(); PI != PE; ++PI) {
if (!std::count(ProcIndices.begin(), ProcIndices.end(), PI->Index))
dbgs() << "No machine model for " << Inst->TheDef->getName()
<< " on processor " << PI->ModelName << '\n';
}
}
}
/// Find an SchedClass that has been inferred from a per-operand list of
/// SchedWrites and SchedReads.
unsigned CodeGenSchedModels::findSchedClassIdx(Record *ItinClassDef,
const IdxVec &Writes,
const IdxVec &Reads) const {
for (SchedClassIter I = schedClassBegin(), E = schedClassEnd(); I != E; ++I) {
if (I->ItinClassDef == ItinClassDef
&& I->Writes == Writes && I->Reads == Reads) {
return I - schedClassBegin();
}
}
return 0;
}
// Get the SchedClass index for an instruction.
unsigned CodeGenSchedModels::getSchedClassIdx(
const CodeGenInstruction &Inst) const {
return InstrClassMap.lookup(Inst.TheDef);
}
std::string CodeGenSchedModels::createSchedClassName(
Record *ItinClassDef, const IdxVec &OperWrites, const IdxVec &OperReads) {
std::string Name;
if (ItinClassDef && ItinClassDef->getName() != "NoItinerary")
Name = ItinClassDef->getName();
for (IdxIter WI = OperWrites.begin(), WE = OperWrites.end(); WI != WE; ++WI) {
if (!Name.empty())
Name += '_';
Name += SchedWrites[*WI].Name;
}
for (IdxIter RI = OperReads.begin(), RE = OperReads.end(); RI != RE; ++RI) {
Name += '_';
Name += SchedReads[*RI].Name;
}
return Name;
}
std::string CodeGenSchedModels::createSchedClassName(const RecVec &InstDefs) {
std::string Name;
for (RecIter I = InstDefs.begin(), E = InstDefs.end(); I != E; ++I) {
if (I != InstDefs.begin())
Name += '_';
Name += (*I)->getName();
}
return Name;
}
/// Add an inferred sched class from an itinerary class and per-operand list of
/// SchedWrites and SchedReads. ProcIndices contains the set of IDs of
/// processors that may utilize this class.
unsigned CodeGenSchedModels::addSchedClass(Record *ItinClassDef,
const IdxVec &OperWrites,
const IdxVec &OperReads,
const IdxVec &ProcIndices)
{
assert(!ProcIndices.empty() && "expect at least one ProcIdx");
unsigned Idx = findSchedClassIdx(ItinClassDef, OperWrites, OperReads);
if (Idx || SchedClasses[0].isKeyEqual(ItinClassDef, OperWrites, OperReads)) {
IdxVec PI;
std::set_union(SchedClasses[Idx].ProcIndices.begin(),
SchedClasses[Idx].ProcIndices.end(),
ProcIndices.begin(), ProcIndices.end(),
std::back_inserter(PI));
SchedClasses[Idx].ProcIndices.swap(PI);
return Idx;
}
Idx = SchedClasses.size();
SchedClasses.resize(Idx+1);
CodeGenSchedClass &SC = SchedClasses.back();
SC.Index = Idx;
SC.Name = createSchedClassName(ItinClassDef, OperWrites, OperReads);
SC.ItinClassDef = ItinClassDef;
SC.Writes = OperWrites;
SC.Reads = OperReads;
SC.ProcIndices = ProcIndices;
return Idx;
}
// Create classes for each set of opcodes that are in the same InstReadWrite
// definition across all processors.
void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) {
// ClassInstrs will hold an entry for each subset of Instrs in InstRWDef that
// intersects with an existing class via a previous InstRWDef. Instrs that do
// not intersect with an existing class refer back to their former class as
// determined from ItinDef or SchedRW.
SmallVector<std::pair<unsigned, SmallVector<Record *, 8> >, 4> ClassInstrs;
// Sort Instrs into sets.
const RecVec *InstDefs = Sets.expand(InstRWDef);
if (InstDefs->empty())
PrintFatalError(InstRWDef->getLoc(), "No matching instruction opcodes");
for (RecIter I = InstDefs->begin(), E = InstDefs->end(); I != E; ++I) {
InstClassMapTy::const_iterator Pos = InstrClassMap.find(*I);
if (Pos == InstrClassMap.end())
PrintFatalError((*I)->getLoc(), "No sched class for instruction.");
unsigned SCIdx = Pos->second;
unsigned CIdx = 0, CEnd = ClassInstrs.size();
for (; CIdx != CEnd; ++CIdx) {
if (ClassInstrs[CIdx].first == SCIdx)
break;
}
if (CIdx == CEnd) {
ClassInstrs.resize(CEnd + 1);
ClassInstrs[CIdx].first = SCIdx;
}
ClassInstrs[CIdx].second.push_back(*I);
}
// For each set of Instrs, create a new class if necessary, and map or remap
// the Instrs to it.
unsigned CIdx = 0, CEnd = ClassInstrs.size();
for (; CIdx != CEnd; ++CIdx) {
unsigned OldSCIdx = ClassInstrs[CIdx].first;
ArrayRef<Record*> InstDefs = ClassInstrs[CIdx].second;
// If the all instrs in the current class are accounted for, then leave
// them mapped to their old class.
if (OldSCIdx) {
const RecVec &RWDefs = SchedClasses[OldSCIdx].InstRWs;
if (!RWDefs.empty()) {
const RecVec *OrigInstDefs = Sets.expand(RWDefs[0]);
unsigned OrigNumInstrs = 0;
for (RecIter I = OrigInstDefs->begin(), E = OrigInstDefs->end();
I != E; ++I) {
if (InstrClassMap[*I] == OldSCIdx)
++OrigNumInstrs;
}
if (OrigNumInstrs == InstDefs.size()) {
assert(SchedClasses[OldSCIdx].ProcIndices[0] == 0 &&
"expected a generic SchedClass");
DEBUG(dbgs() << "InstRW: Reuse SC " << OldSCIdx << ":"
<< SchedClasses[OldSCIdx].Name << " on "
<< InstRWDef->getValueAsDef("SchedModel")->getName() << "\n");
SchedClasses[OldSCIdx].InstRWs.push_back(InstRWDef);
continue;
}
}
}
unsigned SCIdx = SchedClasses.size();
SchedClasses.resize(SCIdx+1);
CodeGenSchedClass &SC = SchedClasses.back();
SC.Index = SCIdx;
SC.Name = createSchedClassName(InstDefs);
DEBUG(dbgs() << "InstRW: New SC " << SCIdx << ":" << SC.Name << " on "
<< InstRWDef->getValueAsDef("SchedModel")->getName() << "\n");
// Preserve ItinDef and Writes/Reads for processors without an InstRW entry.
SC.ItinClassDef = SchedClasses[OldSCIdx].ItinClassDef;
SC.Writes = SchedClasses[OldSCIdx].Writes;
SC.Reads = SchedClasses[OldSCIdx].Reads;
SC.ProcIndices.push_back(0);
// Map each Instr to this new class.
// Note that InstDefs may be a smaller list than InstRWDef's "Instrs".
Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel");
SmallSet<unsigned, 4> RemappedClassIDs;
for (ArrayRef<Record*>::const_iterator
II = InstDefs.begin(), IE = InstDefs.end(); II != IE; ++II) {
unsigned OldSCIdx = InstrClassMap[*II];
if (OldSCIdx && RemappedClassIDs.insert(OldSCIdx).second) {
for (RecIter RI = SchedClasses[OldSCIdx].InstRWs.begin(),
RE = SchedClasses[OldSCIdx].InstRWs.end(); RI != RE; ++RI) {
if ((*RI)->getValueAsDef("SchedModel") == RWModelDef) {
PrintFatalError(InstRWDef->getLoc(), "Overlapping InstRW def " +
(*II)->getName() + " also matches " +
(*RI)->getValue("Instrs")->getValue()->getAsString());
}
assert(*RI != InstRWDef && "SchedClass has duplicate InstRW def");
SC.InstRWs.push_back(*RI);
}
}
InstrClassMap[*II] = SCIdx;
}
SC.InstRWs.push_back(InstRWDef);
}
}
// True if collectProcItins found anything.
bool CodeGenSchedModels::hasItineraries() const {
for (CodeGenSchedModels::ProcIter PI = procModelBegin(), PE = procModelEnd();
PI != PE; ++PI) {
if (PI->hasItineraries())
return true;
}
return false;
}
// Gather the processor itineraries.
void CodeGenSchedModels::collectProcItins() {
for (CodeGenProcModel &ProcModel : ProcModels) {
if (!ProcModel.hasItineraries())
continue;
RecVec ItinRecords = ProcModel.ItinsDef->getValueAsListOfDefs("IID");
assert(!ItinRecords.empty() && "ProcModel.hasItineraries is incorrect");
// Populate ItinDefList with Itinerary records.
ProcModel.ItinDefList.resize(NumInstrSchedClasses);
// Insert each itinerary data record in the correct position within
// the processor model's ItinDefList.
for (unsigned i = 0, N = ItinRecords.size(); i < N; i++) {
Record *ItinData = ItinRecords[i];
Record *ItinDef = ItinData->getValueAsDef("TheClass");
bool FoundClass = false;
for (SchedClassIter SCI = schedClassBegin(), SCE = schedClassEnd();
SCI != SCE; ++SCI) {
// Multiple SchedClasses may share an itinerary. Update all of them.
if (SCI->ItinClassDef == ItinDef) {
ProcModel.ItinDefList[SCI->Index] = ItinData;
FoundClass = true;
}
}
if (!FoundClass) {
DEBUG(dbgs() << ProcModel.ItinsDef->getName()
<< " missing class for itinerary " << ItinDef->getName() << '\n');
}
}
// Check for missing itinerary entries.
assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec");
DEBUG(
for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) {
if (!ProcModel.ItinDefList[i])
dbgs() << ProcModel.ItinsDef->getName()
<< " missing itinerary for class "
<< SchedClasses[i].Name << '\n';
});
}
}
// Gather the read/write types for each itinerary class.
void CodeGenSchedModels::collectProcItinRW() {
RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW");
std::sort(ItinRWDefs.begin(), ItinRWDefs.end(), LessRecord());
for (RecIter II = ItinRWDefs.begin(), IE = ItinRWDefs.end(); II != IE; ++II) {
if (!(*II)->getValueInit("SchedModel")->isComplete())
PrintFatalError((*II)->getLoc(), "SchedModel is undefined");
Record *ModelDef = (*II)->getValueAsDef("SchedModel");
ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
if (I == ProcModelMap.end()) {
PrintFatalError((*II)->getLoc(), "Undefined SchedMachineModel "
+ ModelDef->getName());
}
ProcModels[I->second].ItinRWDefs.push_back(*II);
}
}
/// Infer new classes from existing classes. In the process, this may create new
/// SchedWrites from sequences of existing SchedWrites.
void CodeGenSchedModels::inferSchedClasses() {
DEBUG(dbgs() << NumInstrSchedClasses << " instr sched classes.\n");
// Visit all existing classes and newly created classes.
for (unsigned Idx = 0; Idx != SchedClasses.size(); ++Idx) {
assert(SchedClasses[Idx].Index == Idx && "bad SCIdx");
if (SchedClasses[Idx].ItinClassDef)
inferFromItinClass(SchedClasses[Idx].ItinClassDef, Idx);
if (!SchedClasses[Idx].InstRWs.empty())
inferFromInstRWs(Idx);
if (!SchedClasses[Idx].Writes.empty()) {
inferFromRW(SchedClasses[Idx].Writes, SchedClasses[Idx].Reads,
Idx, SchedClasses[Idx].ProcIndices);
}
assert(SchedClasses.size() < (NumInstrSchedClasses*6) &&
"too many SchedVariants");
}
}
/// Infer classes from per-processor itinerary resources.
void CodeGenSchedModels::inferFromItinClass(Record *ItinClassDef,
unsigned FromClassIdx) {
for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
const CodeGenProcModel &PM = ProcModels[PIdx];
// For all ItinRW entries.
bool HasMatch = false;
for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end();
II != IE; ++II) {
RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses");
if (!std::count(Matched.begin(), Matched.end(), ItinClassDef))
continue;
if (HasMatch)
PrintFatalError((*II)->getLoc(), "Duplicate itinerary class "
+ ItinClassDef->getName()
+ " in ItinResources for " + PM.ModelName);
HasMatch = true;
IdxVec Writes, Reads;
findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
IdxVec ProcIndices(1, PIdx);
inferFromRW(Writes, Reads, FromClassIdx, ProcIndices);
}
}
}
/// Infer classes from per-processor InstReadWrite definitions.
void CodeGenSchedModels::inferFromInstRWs(unsigned SCIdx) {
for (unsigned I = 0, E = SchedClasses[SCIdx].InstRWs.size(); I != E; ++I) {
assert(SchedClasses[SCIdx].InstRWs.size() == E && "InstrRWs was mutated!");
Record *Rec = SchedClasses[SCIdx].InstRWs[I];
const RecVec *InstDefs = Sets.expand(Rec);
RecIter II = InstDefs->begin(), IE = InstDefs->end();
for (; II != IE; ++II) {
if (InstrClassMap[*II] == SCIdx)
break;
}
// If this class no longer has any instructions mapped to it, it has become
// irrelevant.
if (II == IE)
continue;
IdxVec Writes, Reads;
findRWs(Rec->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
unsigned PIdx = getProcModel(Rec->getValueAsDef("SchedModel")).Index;
IdxVec ProcIndices(1, PIdx);
inferFromRW(Writes, Reads, SCIdx, ProcIndices); // May mutate SchedClasses.
}
}
namespace {
// Helper for substituteVariantOperand.
struct TransVariant {
Record *VarOrSeqDef; // Variant or sequence.
unsigned RWIdx; // Index of this variant or sequence's matched type.
unsigned ProcIdx; // Processor model index or zero for any.
unsigned TransVecIdx; // Index into PredTransitions::TransVec.
TransVariant(Record *def, unsigned rwi, unsigned pi, unsigned ti):
VarOrSeqDef(def), RWIdx(rwi), ProcIdx(pi), TransVecIdx(ti) {}
};
// Associate a predicate with the SchedReadWrite that it guards.
// RWIdx is the index of the read/write variant.
struct PredCheck {
bool IsRead;
unsigned RWIdx;
Record *Predicate;
PredCheck(bool r, unsigned w, Record *p): IsRead(r), RWIdx(w), Predicate(p) {}
};
// A Predicate transition is a list of RW sequences guarded by a PredTerm.
struct PredTransition {
// A predicate term is a conjunction of PredChecks.
SmallVector<PredCheck, 4> PredTerm;
SmallVector<SmallVector<unsigned,4>, 16> WriteSequences;
SmallVector<SmallVector<unsigned,4>, 16> ReadSequences;
SmallVector<unsigned, 4> ProcIndices;
};
// Encapsulate a set of partially constructed transitions.
// The results are built by repeated calls to substituteVariants.
class PredTransitions {
CodeGenSchedModels &SchedModels;
public:
std::vector<PredTransition> TransVec;
PredTransitions(CodeGenSchedModels &sm): SchedModels(sm) {}
void substituteVariantOperand(const SmallVectorImpl<unsigned> &RWSeq,
bool IsRead, unsigned StartIdx);
void substituteVariants(const PredTransition &Trans);
#ifndef NDEBUG
void dump() const;
#endif
private:
bool mutuallyExclusive(Record *PredDef, ArrayRef<PredCheck> Term);
void getIntersectingVariants(
const CodeGenSchedRW &SchedRW, unsigned TransIdx,
std::vector<TransVariant> &IntersectingVariants);
void pushVariant(const TransVariant &VInfo, bool IsRead);
};
} // anonymous
// Return true if this predicate is mutually exclusive with a PredTerm. This
// degenerates into checking if the predicate is mutually exclusive with any
// predicate in the Term's conjunction.
//
// All predicates associated with a given SchedRW are considered mutually
// exclusive. This should work even if the conditions expressed by the
// predicates are not exclusive because the predicates for a given SchedWrite
// are always checked in the order they are defined in the .td file. Later
// conditions implicitly negate any prior condition.
bool PredTransitions::mutuallyExclusive(Record *PredDef,
ArrayRef<PredCheck> Term) {
for (ArrayRef<PredCheck>::iterator I = Term.begin(), E = Term.end();
I != E; ++I) {
if (I->Predicate == PredDef)
return false;
const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(I->RWIdx, I->IsRead);
assert(SchedRW.HasVariants && "PredCheck must refer to a SchedVariant");
RecVec Variants = SchedRW.TheDef->getValueAsListOfDefs("Variants");
for (RecIter VI = Variants.begin(), VE = Variants.end(); VI != VE; ++VI) {
if ((*VI)->getValueAsDef("Predicate") == PredDef)
return true;
}
}
return false;
}
static bool hasAliasedVariants(const CodeGenSchedRW &RW,
CodeGenSchedModels &SchedModels) {
if (RW.HasVariants)
return true;
for (RecIter I = RW.Aliases.begin(), E = RW.Aliases.end(); I != E; ++I) {
const CodeGenSchedRW &AliasRW =
SchedModels.getSchedRW((*I)->getValueAsDef("AliasRW"));
if (AliasRW.HasVariants)
return true;
if (AliasRW.IsSequence) {
IdxVec ExpandedRWs;
SchedModels.expandRWSequence(AliasRW.Index, ExpandedRWs, AliasRW.IsRead);
for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end();
SI != SE; ++SI) {
if (hasAliasedVariants(SchedModels.getSchedRW(*SI, AliasRW.IsRead),
SchedModels)) {
return true;
}
}
}
}
return false;
}
static bool hasVariant(ArrayRef<PredTransition> Transitions,
CodeGenSchedModels &SchedModels) {
for (ArrayRef<PredTransition>::iterator
PTI = Transitions.begin(), PTE = Transitions.end();
PTI != PTE; ++PTI) {
for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
WSI = PTI->WriteSequences.begin(), WSE = PTI->WriteSequences.end();
WSI != WSE; ++WSI) {
for (SmallVectorImpl<unsigned>::const_iterator
WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) {
if (hasAliasedVariants(SchedModels.getSchedWrite(*WI), SchedModels))
return true;
}
}
for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
RSI = PTI->ReadSequences.begin(), RSE = PTI->ReadSequences.end();
RSI != RSE; ++RSI) {
for (SmallVectorImpl<unsigned>::const_iterator
RI = RSI->begin(), RE = RSI->end(); RI != RE; ++RI) {
if (hasAliasedVariants(SchedModels.getSchedRead(*RI), SchedModels))
return true;
}
}
}
return false;
}
// Populate IntersectingVariants with any variants or aliased sequences of the
// given SchedRW whose processor indices and predicates are not mutually
// exclusive with the given transition.
void PredTransitions::getIntersectingVariants(
const CodeGenSchedRW &SchedRW, unsigned TransIdx,
std::vector<TransVariant> &IntersectingVariants) {
bool GenericRW = false;
std::vector<TransVariant> Variants;
if (SchedRW.HasVariants) {
unsigned VarProcIdx = 0;
if (SchedRW.TheDef->getValueInit("SchedModel")->isComplete()) {
Record *ModelDef = SchedRW.TheDef->getValueAsDef("SchedModel");
VarProcIdx = SchedModels.getProcModel(ModelDef).Index;
}
// Push each variant. Assign TransVecIdx later.
const RecVec VarDefs = SchedRW.TheDef->getValueAsListOfDefs("Variants");
for (RecIter RI = VarDefs.begin(), RE = VarDefs.end(); RI != RE; ++RI)
Variants.push_back(TransVariant(*RI, SchedRW.Index, VarProcIdx, 0));
if (VarProcIdx == 0)
GenericRW = true;
}
for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end();
AI != AE; ++AI) {
// If either the SchedAlias itself or the SchedReadWrite that it aliases
// to is defined within a processor model, constrain all variants to
// that processor.
unsigned AliasProcIdx = 0;
if ((*AI)->getValueInit("SchedModel")->isComplete()) {
Record *ModelDef = (*AI)->getValueAsDef("SchedModel");
AliasProcIdx = SchedModels.getProcModel(ModelDef).Index;
}
const CodeGenSchedRW &AliasRW =
SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW"));
if (AliasRW.HasVariants) {
const RecVec VarDefs = AliasRW.TheDef->getValueAsListOfDefs("Variants");
for (RecIter RI = VarDefs.begin(), RE = VarDefs.end(); RI != RE; ++RI)
Variants.push_back(TransVariant(*RI, AliasRW.Index, AliasProcIdx, 0));
}
if (AliasRW.IsSequence) {
Variants.push_back(
TransVariant(AliasRW.TheDef, SchedRW.Index, AliasProcIdx, 0));
}
if (AliasProcIdx == 0)
GenericRW = true;
}
for (unsigned VIdx = 0, VEnd = Variants.size(); VIdx != VEnd; ++VIdx) {
TransVariant &Variant = Variants[VIdx];
// Don't expand variants if the processor models don't intersect.
// A zero processor index means any processor.
SmallVectorImpl<unsigned> &ProcIndices = TransVec[TransIdx].ProcIndices;
if (ProcIndices[0] && Variants[VIdx].ProcIdx) {
unsigned Cnt = std::count(ProcIndices.begin(), ProcIndices.end(),
Variant.ProcIdx);
if (!Cnt)
continue;
if (Cnt > 1) {
const CodeGenProcModel &PM =
*(SchedModels.procModelBegin() + Variant.ProcIdx);
PrintFatalError(Variant.VarOrSeqDef->getLoc(),
"Multiple variants defined for processor " +
PM.ModelName +
" Ensure only one SchedAlias exists per RW.");
}
}
if (Variant.VarOrSeqDef->isSubClassOf("SchedVar")) {
Record *PredDef = Variant.VarOrSeqDef->getValueAsDef("Predicate");
if (mutuallyExclusive(PredDef, TransVec[TransIdx].PredTerm))
continue;
}
if (IntersectingVariants.empty()) {
// The first variant builds on the existing transition.
Variant.TransVecIdx = TransIdx;
IntersectingVariants.push_back(Variant);
}
else {
// Push another copy of the current transition for more variants.
Variant.TransVecIdx = TransVec.size();
IntersectingVariants.push_back(Variant);
TransVec.push_back(TransVec[TransIdx]);
}
}
if (GenericRW && IntersectingVariants.empty()) {
PrintFatalError(SchedRW.TheDef->getLoc(), "No variant of this type has "
"a matching predicate on any processor");
}
}
// Push the Reads/Writes selected by this variant onto the PredTransition
// specified by VInfo.
void PredTransitions::
pushVariant(const TransVariant &VInfo, bool IsRead) {
PredTransition &Trans = TransVec[VInfo.TransVecIdx];
// If this operand transition is reached through a processor-specific alias,
// then the whole transition is specific to this processor.
if (VInfo.ProcIdx != 0)
Trans.ProcIndices.assign(1, VInfo.ProcIdx);
IdxVec SelectedRWs;
if (VInfo.VarOrSeqDef->isSubClassOf("SchedVar")) {
Record *PredDef = VInfo.VarOrSeqDef->getValueAsDef("Predicate");
Trans.PredTerm.push_back(PredCheck(IsRead, VInfo.RWIdx,PredDef));
RecVec SelectedDefs = VInfo.VarOrSeqDef->getValueAsListOfDefs("Selected");
SchedModels.findRWs(SelectedDefs, SelectedRWs, IsRead);
}
else {
assert(VInfo.VarOrSeqDef->isSubClassOf("WriteSequence") &&
"variant must be a SchedVariant or aliased WriteSequence");
SelectedRWs.push_back(SchedModels.getSchedRWIdx(VInfo.VarOrSeqDef, IsRead));
}
const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(VInfo.RWIdx, IsRead);
SmallVectorImpl<SmallVector<unsigned,4> > &RWSequences = IsRead
? Trans.ReadSequences : Trans.WriteSequences;
if (SchedRW.IsVariadic) {
unsigned OperIdx = RWSequences.size()-1;
// Make N-1 copies of this transition's last sequence.
for (unsigned i = 1, e = SelectedRWs.size(); i != e; ++i) {
// Create a temporary copy the vector could reallocate.
RWSequences.reserve(RWSequences.size() + 1);
RWSequences.push_back(RWSequences[OperIdx]);
}
// Push each of the N elements of the SelectedRWs onto a copy of the last
// sequence (split the current operand into N operands).
// Note that write sequences should be expanded within this loop--the entire
// sequence belongs to a single operand.
for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end();
RWI != RWE; ++RWI, ++OperIdx) {
IdxVec ExpandedRWs;
if (IsRead)
ExpandedRWs.push_back(*RWI);
else
SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead);
RWSequences[OperIdx].insert(RWSequences[OperIdx].end(),
ExpandedRWs.begin(), ExpandedRWs.end());
}
assert(OperIdx == RWSequences.size() && "missed a sequence");
}
else {
// Push this transition's expanded sequence onto this transition's last
// sequence (add to the current operand's sequence).
SmallVectorImpl<unsigned> &Seq = RWSequences.back();
IdxVec ExpandedRWs;
for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end();
RWI != RWE; ++RWI) {
if (IsRead)
ExpandedRWs.push_back(*RWI);
else
SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead);
}
Seq.insert(Seq.end(), ExpandedRWs.begin(), ExpandedRWs.end());
}
}
// RWSeq is a sequence of all Reads or all Writes for the next read or write
// operand. StartIdx is an index into TransVec where partial results
// starts. RWSeq must be applied to all transitions between StartIdx and the end
// of TransVec.
void PredTransitions::substituteVariantOperand(
const SmallVectorImpl<unsigned> &RWSeq, bool IsRead, unsigned StartIdx) {
// Visit each original RW within the current sequence.
for (SmallVectorImpl<unsigned>::const_iterator
RWI = RWSeq.begin(), RWE = RWSeq.end(); RWI != RWE; ++RWI) {
const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(*RWI, IsRead);
// Push this RW on all partial PredTransitions or distribute variants.
// New PredTransitions may be pushed within this loop which should not be
// revisited (TransEnd must be loop invariant).
for (unsigned TransIdx = StartIdx, TransEnd = TransVec.size();
TransIdx != TransEnd; ++TransIdx) {
// In the common case, push RW onto the current operand's sequence.
if (!hasAliasedVariants(SchedRW, SchedModels)) {
if (IsRead)
TransVec[TransIdx].ReadSequences.back().push_back(*RWI);
else
TransVec[TransIdx].WriteSequences.back().push_back(*RWI);
continue;
}
// Distribute this partial PredTransition across intersecting variants.
// This will push a copies of TransVec[TransIdx] on the back of TransVec.
std::vector<TransVariant> IntersectingVariants;
getIntersectingVariants(SchedRW, TransIdx, IntersectingVariants);
// Now expand each variant on top of its copy of the transition.
for (std::vector<TransVariant>::const_iterator
IVI = IntersectingVariants.begin(),
IVE = IntersectingVariants.end();
IVI != IVE; ++IVI) {
pushVariant(*IVI, IsRead);
}
}
}
}
// For each variant of a Read/Write in Trans, substitute the sequence of
// Read/Writes guarded by the variant. This is exponential in the number of
// variant Read/Writes, but in practice detection of mutually exclusive
// predicates should result in linear growth in the total number variants.
//
// This is one step in a breadth-first search of nested variants.
void PredTransitions::substituteVariants(const PredTransition &Trans) {
// Build up a set of partial results starting at the back of
// PredTransitions. Remember the first new transition.
unsigned StartIdx = TransVec.size();
TransVec.resize(TransVec.size() + 1);
TransVec.back().PredTerm = Trans.PredTerm;
TransVec.back().ProcIndices = Trans.ProcIndices;
// Visit each original write sequence.
for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
WSI = Trans.WriteSequences.begin(), WSE = Trans.WriteSequences.end();
WSI != WSE; ++WSI) {
// Push a new (empty) write sequence onto all partial Transitions.
for (std::vector<PredTransition>::iterator I =
TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) {
I->WriteSequences.resize(I->WriteSequences.size() + 1);
}
substituteVariantOperand(*WSI, /*IsRead=*/false, StartIdx);
}
// Visit each original read sequence.
for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
RSI = Trans.ReadSequences.begin(), RSE = Trans.ReadSequences.end();
RSI != RSE; ++RSI) {
// Push a new (empty) read sequence onto all partial Transitions.
for (std::vector<PredTransition>::iterator I =
TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) {
I->ReadSequences.resize(I->ReadSequences.size() + 1);
}
substituteVariantOperand(*RSI, /*IsRead=*/true, StartIdx);
}
}
// Create a new SchedClass for each variant found by inferFromRW. Pass
static void inferFromTransitions(ArrayRef<PredTransition> LastTransitions,
unsigned FromClassIdx,
CodeGenSchedModels &SchedModels) {
// For each PredTransition, create a new CodeGenSchedTransition, which usually
// requires creating a new SchedClass.
for (ArrayRef<PredTransition>::iterator
I = LastTransitions.begin(), E = LastTransitions.end(); I != E; ++I) {
IdxVec OperWritesVariant;
for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
WSI = I->WriteSequences.begin(), WSE = I->WriteSequences.end();
WSI != WSE; ++WSI) {
// Create a new write representing the expanded sequence.
OperWritesVariant.push_back(
SchedModels.findOrInsertRW(*WSI, /*IsRead=*/false));
}
IdxVec OperReadsVariant;
for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
RSI = I->ReadSequences.begin(), RSE = I->ReadSequences.end();
RSI != RSE; ++RSI) {
// Create a new read representing the expanded sequence.
OperReadsVariant.push_back(
SchedModels.findOrInsertRW(*RSI, /*IsRead=*/true));
}
IdxVec ProcIndices(I->ProcIndices.begin(), I->ProcIndices.end());
CodeGenSchedTransition SCTrans;
SCTrans.ToClassIdx =
SchedModels.addSchedClass(/*ItinClassDef=*/nullptr, OperWritesVariant,
OperReadsVariant, ProcIndices);
SCTrans.ProcIndices = ProcIndices;
// The final PredTerm is unique set of predicates guarding the transition.
RecVec Preds;
for (SmallVectorImpl<PredCheck>::const_iterator
PI = I->PredTerm.begin(), PE = I->PredTerm.end(); PI != PE; ++PI) {
Preds.push_back(PI->Predicate);
}
RecIter PredsEnd = std::unique(Preds.begin(), Preds.end());
Preds.resize(PredsEnd - Preds.begin());
SCTrans.PredTerm = Preds;
SchedModels.getSchedClass(FromClassIdx).Transitions.push_back(SCTrans);
}
}
// Create new SchedClasses for the given ReadWrite list. If any of the
// ReadWrites refers to a SchedVariant, create a new SchedClass for each variant
// of the ReadWrite list, following Aliases if necessary.
void CodeGenSchedModels::inferFromRW(const IdxVec &OperWrites,
const IdxVec &OperReads,
unsigned FromClassIdx,
const IdxVec &ProcIndices) {
DEBUG(dbgs() << "INFER RW proc("; dumpIdxVec(ProcIndices); dbgs() << ") ");
// Create a seed transition with an empty PredTerm and the expanded sequences
// of SchedWrites for the current SchedClass.
std::vector<PredTransition> LastTransitions;
LastTransitions.resize(1);
LastTransitions.back().ProcIndices.append(ProcIndices.begin(),
ProcIndices.end());
for (IdxIter I = OperWrites.begin(), E = OperWrites.end(); I != E; ++I) {
IdxVec WriteSeq;
expandRWSequence(*I, WriteSeq, /*IsRead=*/false);
unsigned Idx = LastTransitions[0].WriteSequences.size();
LastTransitions[0].WriteSequences.resize(Idx + 1);
SmallVectorImpl<unsigned> &Seq = LastTransitions[0].WriteSequences[Idx];
for (IdxIter WI = WriteSeq.begin(), WE = WriteSeq.end(); WI != WE; ++WI)
Seq.push_back(*WI);
DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
}
DEBUG(dbgs() << " Reads: ");
for (IdxIter I = OperReads.begin(), E = OperReads.end(); I != E; ++I) {
IdxVec ReadSeq;
expandRWSequence(*I, ReadSeq, /*IsRead=*/true);
unsigned Idx = LastTransitions[0].ReadSequences.size();
LastTransitions[0].ReadSequences.resize(Idx + 1);
SmallVectorImpl<unsigned> &Seq = LastTransitions[0].ReadSequences[Idx];
for (IdxIter RI = ReadSeq.begin(), RE = ReadSeq.end(); RI != RE; ++RI)
Seq.push_back(*RI);
DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
}
DEBUG(dbgs() << '\n');
// Collect all PredTransitions for individual operands.
// Iterate until no variant writes remain.
while (hasVariant(LastTransitions, *this)) {
PredTransitions Transitions(*this);
for (std::vector<PredTransition>::const_iterator
I = LastTransitions.begin(), E = LastTransitions.end();
I != E; ++I) {
Transitions.substituteVariants(*I);
}
DEBUG(Transitions.dump());
LastTransitions.swap(Transitions.TransVec);
}
// If the first transition has no variants, nothing to do.
if (LastTransitions[0].PredTerm.empty())
return;
// WARNING: We are about to mutate the SchedClasses vector. Do not refer to
// OperWrites, OperReads, or ProcIndices after calling inferFromTransitions.
inferFromTransitions(LastTransitions, FromClassIdx, *this);
}
// Check if any processor resource group contains all resource records in
// SubUnits.
bool CodeGenSchedModels::hasSuperGroup(RecVec &SubUnits, CodeGenProcModel &PM) {
for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) {
if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup"))
continue;
RecVec SuperUnits =
PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources");
RecIter RI = SubUnits.begin(), RE = SubUnits.end();
for ( ; RI != RE; ++RI) {
if (std::find(SuperUnits.begin(), SuperUnits.end(), *RI)
== SuperUnits.end()) {
break;
}
}
if (RI == RE)
return true;
}
return false;
}
// Verify that overlapping groups have a common supergroup.
void CodeGenSchedModels::verifyProcResourceGroups(CodeGenProcModel &PM) {
for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) {
if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup"))
continue;
RecVec CheckUnits =
PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources");
for (unsigned j = i+1; j < e; ++j) {
if (!PM.ProcResourceDefs[j]->isSubClassOf("ProcResGroup"))
continue;
RecVec OtherUnits =
PM.ProcResourceDefs[j]->getValueAsListOfDefs("Resources");
if (std::find_first_of(CheckUnits.begin(), CheckUnits.end(),
OtherUnits.begin(), OtherUnits.end())
!= CheckUnits.end()) {
// CheckUnits and OtherUnits overlap
OtherUnits.insert(OtherUnits.end(), CheckUnits.begin(),
CheckUnits.end());
if (!hasSuperGroup(OtherUnits, PM)) {
PrintFatalError((PM.ProcResourceDefs[i])->getLoc(),
"proc resource group overlaps with "
+ PM.ProcResourceDefs[j]->getName()
+ " but no supergroup contains both.");
}
}
}
}
}
// Collect and sort WriteRes, ReadAdvance, and ProcResources.
void CodeGenSchedModels::collectProcResources() {
// Add any subtarget-specific SchedReadWrites that are directly associated
// with processor resources. Refer to the parent SchedClass's ProcIndices to
// determine which processors they apply to.
for (SchedClassIter SCI = schedClassBegin(), SCE = schedClassEnd();
SCI != SCE; ++SCI) {
if (SCI->ItinClassDef)
collectItinProcResources(SCI->ItinClassDef);
else {
// This class may have a default ReadWrite list which can be overriden by
// InstRW definitions.
if (!SCI->InstRWs.empty()) {
for (RecIter RWI = SCI->InstRWs.begin(), RWE = SCI->InstRWs.end();
RWI != RWE; ++RWI) {
Record *RWModelDef = (*RWI)->getValueAsDef("SchedModel");
IdxVec ProcIndices(1, getProcModel(RWModelDef).Index);
IdxVec Writes, Reads;
findRWs((*RWI)->getValueAsListOfDefs("OperandReadWrites"),
Writes, Reads);
collectRWResources(Writes, Reads, ProcIndices);
}
}
collectRWResources(SCI->Writes, SCI->Reads, SCI->ProcIndices);
}
}
// Add resources separately defined by each subtarget.
RecVec WRDefs = Records.getAllDerivedDefinitions("WriteRes");
for (RecIter WRI = WRDefs.begin(), WRE = WRDefs.end(); WRI != WRE; ++WRI) {
Record *ModelDef = (*WRI)->getValueAsDef("SchedModel");
addWriteRes(*WRI, getProcModel(ModelDef).Index);
}
RecVec SWRDefs = Records.getAllDerivedDefinitions("SchedWriteRes");
for (RecIter WRI = SWRDefs.begin(), WRE = SWRDefs.end(); WRI != WRE; ++WRI) {
Record *ModelDef = (*WRI)->getValueAsDef("SchedModel");
addWriteRes(*WRI, getProcModel(ModelDef).Index);
}
RecVec RADefs = Records.getAllDerivedDefinitions("ReadAdvance");
for (RecIter RAI = RADefs.begin(), RAE = RADefs.end(); RAI != RAE; ++RAI) {
Record *ModelDef = (*RAI)->getValueAsDef("SchedModel");
addReadAdvance(*RAI, getProcModel(ModelDef).Index);
}
RecVec SRADefs = Records.getAllDerivedDefinitions("SchedReadAdvance");
for (RecIter RAI = SRADefs.begin(), RAE = SRADefs.end(); RAI != RAE; ++RAI) {
if ((*RAI)->getValueInit("SchedModel")->isComplete()) {
Record *ModelDef = (*RAI)->getValueAsDef("SchedModel");
addReadAdvance(*RAI, getProcModel(ModelDef).Index);
}
}
// Add ProcResGroups that are defined within this processor model, which may
// not be directly referenced but may directly specify a buffer size.
RecVec ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup");
for (RecIter RI = ProcResGroups.begin(), RE = ProcResGroups.end();
RI != RE; ++RI) {
if (!(*RI)->getValueInit("SchedModel")->isComplete())
continue;
CodeGenProcModel &PM = getProcModel((*RI)->getValueAsDef("SchedModel"));
RecIter I = std::find(PM.ProcResourceDefs.begin(),
PM.ProcResourceDefs.end(), *RI);
if (I == PM.ProcResourceDefs.end())
PM.ProcResourceDefs.push_back(*RI);
}
// Finalize each ProcModel by sorting the record arrays.
for (CodeGenProcModel &PM : ProcModels) {
std::sort(PM.WriteResDefs.begin(), PM.WriteResDefs.end(),
LessRecord());
std::sort(PM.ReadAdvanceDefs.begin(), PM.ReadAdvanceDefs.end(),
LessRecord());
std::sort(PM.ProcResourceDefs.begin(), PM.ProcResourceDefs.end(),
LessRecord());
DEBUG(
PM.dump();
dbgs() << "WriteResDefs: ";
for (RecIter RI = PM.WriteResDefs.begin(),
RE = PM.WriteResDefs.end(); RI != RE; ++RI) {
if ((*RI)->isSubClassOf("WriteRes"))
dbgs() << (*RI)->getValueAsDef("WriteType")->getName() << " ";
else
dbgs() << (*RI)->getName() << " ";
}
dbgs() << "\nReadAdvanceDefs: ";
for (RecIter RI = PM.ReadAdvanceDefs.begin(),
RE = PM.ReadAdvanceDefs.end(); RI != RE; ++RI) {
if ((*RI)->isSubClassOf("ReadAdvance"))
dbgs() << (*RI)->getValueAsDef("ReadType")->getName() << " ";
else
dbgs() << (*RI)->getName() << " ";
}
dbgs() << "\nProcResourceDefs: ";
for (RecIter RI = PM.ProcResourceDefs.begin(),
RE = PM.ProcResourceDefs.end(); RI != RE; ++RI) {
dbgs() << (*RI)->getName() << " ";
}
dbgs() << '\n');
verifyProcResourceGroups(PM);
}
}
// Collect itinerary class resources for each processor.
void CodeGenSchedModels::collectItinProcResources(Record *ItinClassDef) {
for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
const CodeGenProcModel &PM = ProcModels[PIdx];
// For all ItinRW entries.
bool HasMatch = false;
for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end();
II != IE; ++II) {
RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses");
if (!std::count(Matched.begin(), Matched.end(), ItinClassDef))
continue;
if (HasMatch)
PrintFatalError((*II)->getLoc(), "Duplicate itinerary class "
+ ItinClassDef->getName()
+ " in ItinResources for " + PM.ModelName);
HasMatch = true;
IdxVec Writes, Reads;
findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
IdxVec ProcIndices(1, PIdx);
collectRWResources(Writes, Reads, ProcIndices);
}
}
}
void CodeGenSchedModels::collectRWResources(unsigned RWIdx, bool IsRead,
const IdxVec &ProcIndices) {
const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead);
if (SchedRW.TheDef) {
if (!IsRead && SchedRW.TheDef->isSubClassOf("SchedWriteRes")) {
for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end();
PI != PE; ++PI) {
addWriteRes(SchedRW.TheDef, *PI);
}
}
else if (IsRead && SchedRW.TheDef->isSubClassOf("SchedReadAdvance")) {
for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end();
PI != PE; ++PI) {
addReadAdvance(SchedRW.TheDef, *PI);
}
}
}
for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end();
AI != AE; ++AI) {
IdxVec AliasProcIndices;
if ((*AI)->getValueInit("SchedModel")->isComplete()) {
AliasProcIndices.push_back(
getProcModel((*AI)->getValueAsDef("SchedModel")).Index);
}
else
AliasProcIndices = ProcIndices;
const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW"));
assert(AliasRW.IsRead == IsRead && "cannot alias reads to writes");
IdxVec ExpandedRWs;
expandRWSequence(AliasRW.Index, ExpandedRWs, IsRead);
for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end();
SI != SE; ++SI) {
collectRWResources(*SI, IsRead, AliasProcIndices);
}
}
}
// Collect resources for a set of read/write types and processor indices.
void CodeGenSchedModels::collectRWResources(const IdxVec &Writes,
const IdxVec &Reads,
const IdxVec &ProcIndices) {
for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI)
collectRWResources(*WI, /*IsRead=*/false, ProcIndices);
for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI)
collectRWResources(*RI, /*IsRead=*/true, ProcIndices);
}
// Find the processor's resource units for this kind of resource.
Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind,
const CodeGenProcModel &PM) const {
if (ProcResKind->isSubClassOf("ProcResourceUnits"))
return ProcResKind;
Record *ProcUnitDef = nullptr;
RecVec ProcResourceDefs =
Records.getAllDerivedDefinitions("ProcResourceUnits");
for (RecIter RI = ProcResourceDefs.begin(), RE = ProcResourceDefs.end();
RI != RE; ++RI) {
if ((*RI)->getValueAsDef("Kind") == ProcResKind
&& (*RI)->getValueAsDef("SchedModel") == PM.ModelDef) {
if (ProcUnitDef) {
PrintFatalError((*RI)->getLoc(),
"Multiple ProcessorResourceUnits associated with "
+ ProcResKind->getName());
}
ProcUnitDef = *RI;
}
}
RecVec ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup");
for (RecIter RI = ProcResGroups.begin(), RE = ProcResGroups.end();
RI != RE; ++RI) {
if (*RI == ProcResKind
&& (*RI)->getValueAsDef("SchedModel") == PM.ModelDef) {
if (ProcUnitDef) {
PrintFatalError((*RI)->getLoc(),
"Multiple ProcessorResourceUnits associated with "
+ ProcResKind->getName());
}
ProcUnitDef = *RI;
}
}
if (!ProcUnitDef) {
PrintFatalError(ProcResKind->getLoc(),
"No ProcessorResources associated with "
+ ProcResKind->getName());
}
return ProcUnitDef;
}
// Iteratively add a resource and its super resources.
void CodeGenSchedModels::addProcResource(Record *ProcResKind,
CodeGenProcModel &PM) {
for (;;) {
Record *ProcResUnits = findProcResUnits(ProcResKind, PM);
// See if this ProcResource is already associated with this processor.
RecIter I = std::find(PM.ProcResourceDefs.begin(),
PM.ProcResourceDefs.end(), ProcResUnits);
if (I != PM.ProcResourceDefs.end())
return;
PM.ProcResourceDefs.push_back(ProcResUnits);
if (ProcResUnits->isSubClassOf("ProcResGroup"))
return;
if (!ProcResUnits->getValueInit("Super")->isComplete())
return;
ProcResKind = ProcResUnits->getValueAsDef("Super");
}
}
// Add resources for a SchedWrite to this processor if they don't exist.
void CodeGenSchedModels::addWriteRes(Record *ProcWriteResDef, unsigned PIdx) {
assert(PIdx && "don't add resources to an invalid Processor model");
RecVec &WRDefs = ProcModels[PIdx].WriteResDefs;
RecIter WRI = std::find(WRDefs.begin(), WRDefs.end(), ProcWriteResDef);
if (WRI != WRDefs.end())
return;
WRDefs.push_back(ProcWriteResDef);
// Visit ProcResourceKinds referenced by the newly discovered WriteRes.
RecVec ProcResDefs = ProcWriteResDef->getValueAsListOfDefs("ProcResources");
for (RecIter WritePRI = ProcResDefs.begin(), WritePRE = ProcResDefs.end();
WritePRI != WritePRE; ++WritePRI) {
addProcResource(*WritePRI, ProcModels[PIdx]);
}
}
// Add resources for a ReadAdvance to this processor if they don't exist.
void CodeGenSchedModels::addReadAdvance(Record *ProcReadAdvanceDef,
unsigned PIdx) {
RecVec &RADefs = ProcModels[PIdx].ReadAdvanceDefs;
RecIter I = std::find(RADefs.begin(), RADefs.end(), ProcReadAdvanceDef);
if (I != RADefs.end())
return;
RADefs.push_back(ProcReadAdvanceDef);
}
unsigned CodeGenProcModel::getProcResourceIdx(Record *PRDef) const {
RecIter PRPos = std::find(ProcResourceDefs.begin(), ProcResourceDefs.end(),
PRDef);
if (PRPos == ProcResourceDefs.end())
PrintFatalError(PRDef->getLoc(), "ProcResource def is not included in "
"the ProcResources list for " + ModelName);
// Idx=0 is reserved for invalid.
return 1 + (PRPos - ProcResourceDefs.begin());
}
#ifndef NDEBUG
void CodeGenProcModel::dump() const {
dbgs() << Index << ": " << ModelName << " "
<< (ModelDef ? ModelDef->getName() : "inferred") << " "
<< (ItinsDef ? ItinsDef->getName() : "no itinerary") << '\n';
}
void CodeGenSchedRW::dump() const {
dbgs() << Name << (IsVariadic ? " (V) " : " ");
if (IsSequence) {
dbgs() << "(";
dumpIdxVec(Sequence);
dbgs() << ")";
}
}
void CodeGenSchedClass::dump(const CodeGenSchedModels* SchedModels) const {
dbgs() << "SCHEDCLASS " << Index << ":" << Name << '\n'
<< " Writes: ";
for (unsigned i = 0, N = Writes.size(); i < N; ++i) {
SchedModels->getSchedWrite(Writes[i]).dump();
if (i < N-1) {
dbgs() << '\n';
dbgs().indent(10);
}
}
dbgs() << "\n Reads: ";
for (unsigned i = 0, N = Reads.size(); i < N; ++i) {
SchedModels->getSchedRead(Reads[i]).dump();
if (i < N-1) {
dbgs() << '\n';
dbgs().indent(10);
}
}
dbgs() << "\n ProcIdx: "; dumpIdxVec(ProcIndices); dbgs() << '\n';
if (!Transitions.empty()) {
dbgs() << "\n Transitions for Proc ";
for (std::vector<CodeGenSchedTransition>::const_iterator
TI = Transitions.begin(), TE = Transitions.end(); TI != TE; ++TI) {
dumpIdxVec(TI->ProcIndices);
}
}
}
void PredTransitions::dump() const {
dbgs() << "Expanded Variants:\n";
for (std::vector<PredTransition>::const_iterator
TI = TransVec.begin(), TE = TransVec.end(); TI != TE; ++TI) {
dbgs() << "{";
for (SmallVectorImpl<PredCheck>::const_iterator
PCI = TI->PredTerm.begin(), PCE = TI->PredTerm.end();
PCI != PCE; ++PCI) {
if (PCI != TI->PredTerm.begin())
dbgs() << ", ";
dbgs() << SchedModels.getSchedRW(PCI->RWIdx, PCI->IsRead).Name
<< ":" << PCI->Predicate->getName();
}
dbgs() << "},\n => {";
for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
WSI = TI->WriteSequences.begin(), WSE = TI->WriteSequences.end();
WSI != WSE; ++WSI) {
dbgs() << "(";
for (SmallVectorImpl<unsigned>::const_iterator
WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) {
if (WI != WSI->begin())
dbgs() << ", ";
dbgs() << SchedModels.getSchedWrite(*WI).Name;
}
dbgs() << "),";
}
dbgs() << "}\n";
}
}
#endif // NDEBUG
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/SequenceToOffsetTable.h | //===-- SequenceToOffsetTable.h - Compress similar sequences ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// SequenceToOffsetTable can be used to emit a number of null-terminated
// sequences as one big array. Use the same memory when a sequence is a suffix
// of another.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_UTILS_TABLEGEN_SEQUENCETOOFFSETTABLE_H
#define LLVM_UTILS_TABLEGEN_SEQUENCETOOFFSETTABLE_H
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cctype>
#include <functional>
#include <map>
#include <vector>
namespace llvm {
/// SequenceToOffsetTable - Collect a number of terminated sequences of T.
/// Compute the layout of a table that contains all the sequences, possibly by
/// reusing entries.
///
/// @tparam SeqT The sequence container. (vector or string).
/// @tparam Less A stable comparator for SeqT elements.
template<typename SeqT, typename Less = std::less<typename SeqT::value_type> >
class SequenceToOffsetTable {
typedef typename SeqT::value_type ElemT;
// Define a comparator for SeqT that sorts a suffix immediately before a
// sequence with that suffix.
struct SeqLess {
Less L;
bool operator()(const SeqT &A, const SeqT &B) const {
return std::lexicographical_compare(A.rbegin(), A.rend(),
B.rbegin(), B.rend(), L);
}
};
// Keep sequences ordered according to SeqLess so suffixes are easy to find.
// Map each sequence to its offset in the table.
typedef std::map<SeqT, unsigned, SeqLess> SeqMap;
// Sequences added so far, with suffixes removed.
SeqMap Seqs;
// Entries in the final table, or 0 before layout was called.
unsigned Entries;
// isSuffix - Returns true if A is a suffix of B.
static bool isSuffix(const SeqT &A, const SeqT &B) {
return A.size() <= B.size() && std::equal(A.rbegin(), A.rend(), B.rbegin());
}
public:
SequenceToOffsetTable() : Entries(0) {}
/// add - Add a sequence to the table.
/// This must be called before layout().
void add(const SeqT &Seq) {
assert(Entries == 0 && "Cannot call add() after layout()");
typename SeqMap::iterator I = Seqs.lower_bound(Seq);
// If SeqMap contains a sequence that has Seq as a suffix, I will be
// pointing to it.
if (I != Seqs.end() && isSuffix(Seq, I->first))
return;
I = Seqs.insert(I, std::make_pair(Seq, 0u));
// The entry before I may be a suffix of Seq that can now be erased.
if (I != Seqs.begin() && isSuffix((--I)->first, Seq))
Seqs.erase(I);
}
bool empty() const { return Seqs.empty(); }
unsigned size() const {
assert(Entries && "Call layout() before size()");
return Entries;
}
/// layout - Computes the final table layout.
void layout() {
assert(Entries == 0 && "Can only call layout() once");
// Lay out the table in Seqs iteration order.
for (typename SeqMap::iterator I = Seqs.begin(), E = Seqs.end(); I != E;
++I) {
I->second = Entries;
// Include space for a terminator.
Entries += I->first.size() + 1;
}
}
/// get - Returns the offset of Seq in the final table.
unsigned get(const SeqT &Seq) const {
assert(Entries && "Call layout() before get()");
typename SeqMap::const_iterator I = Seqs.lower_bound(Seq);
assert(I != Seqs.end() && isSuffix(Seq, I->first) &&
"get() called with sequence that wasn't added first");
return I->second + (I->first.size() - Seq.size());
}
/// emit - Print out the table as the body of an array initializer.
/// Use the Print function to print elements.
void emit(raw_ostream &OS,
void (*Print)(raw_ostream&, ElemT),
const char *Term = "0") const {
assert(Entries && "Call layout() before emit()");
for (typename SeqMap::const_iterator I = Seqs.begin(), E = Seqs.end();
I != E; ++I) {
OS << " /* " << I->second << " */ ";
for (typename SeqT::const_iterator SI = I->first.begin(),
SE = I->first.end(); SI != SE; ++SI) {
Print(OS, *SI);
OS << ", ";
}
OS << Term << ",\n";
}
}
};
// Helper function for SequenceToOffsetTable<string>.
static inline void printChar(raw_ostream &OS, char C) {
unsigned char UC(C);
if (isalnum(UC) || ispunct(UC)) {
OS << '\'';
if (C == '\\' || C == '\'')
OS << '\\';
OS << C << '\'';
} else {
OS << unsigned(UC);
}
}
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/CodeGenMapTable.cpp | //===- CodeGenMapTable.cpp - Instruction Mapping Table Generator ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// CodeGenMapTable provides functionality for the TabelGen to create
// relation mapping between instructions. Relation models are defined using
// InstrMapping as a base class. This file implements the functionality which
// parses these definitions and generates relation maps using the information
// specified there. These maps are emitted as tables in the XXXGenInstrInfo.inc
// file along with the functions to query them.
//
// A relationship model to relate non-predicate instructions with their
// predicated true/false forms can be defined as follows:
//
// def getPredOpcode : InstrMapping {
// let FilterClass = "PredRel";
// let RowFields = ["BaseOpcode"];
// let ColFields = ["PredSense"];
// let KeyCol = ["none"];
// let ValueCols = [["true"], ["false"]]; }
//
// CodeGenMapTable parses this map and generates a table in XXXGenInstrInfo.inc
// file that contains the instructions modeling this relationship. This table
// is defined in the function
// "int getPredOpcode(uint16_t Opcode, enum PredSense inPredSense)"
// that can be used to retrieve the predicated form of the instruction by
// passing its opcode value and the predicate sense (true/false) of the desired
// instruction as arguments.
//
// Short description of the algorithm:
//
// 1) Iterate through all the records that derive from "InstrMapping" class.
// 2) For each record, filter out instructions based on the FilterClass value.
// 3) Iterate through this set of instructions and insert them into
// RowInstrMap map based on their RowFields values. RowInstrMap is keyed by the
// vector of RowFields values and contains vectors of Records (instructions) as
// values. RowFields is a list of fields that are required to have the same
// values for all the instructions appearing in the same row of the relation
// table. All the instructions in a given row of the relation table have some
// sort of relationship with the key instruction defined by the corresponding
// relationship model.
//
// Ex: RowInstrMap(RowVal1, RowVal2, ...) -> [Instr1, Instr2, Instr3, ... ]
// Here Instr1, Instr2, Instr3 have same values (RowVal1, RowVal2) for
// RowFields. These groups of instructions are later matched against ValueCols
// to determine the column they belong to, if any.
//
// While building the RowInstrMap map, collect all the key instructions in
// KeyInstrVec. These are the instructions having the same values as KeyCol
// for all the fields listed in ColFields.
//
// For Example:
//
// Relate non-predicate instructions with their predicated true/false forms.
//
// def getPredOpcode : InstrMapping {
// let FilterClass = "PredRel";
// let RowFields = ["BaseOpcode"];
// let ColFields = ["PredSense"];
// let KeyCol = ["none"];
// let ValueCols = [["true"], ["false"]]; }
//
// Here, only instructions that have "none" as PredSense will be selected as key
// instructions.
//
// 4) For each key instruction, get the group of instructions that share the
// same key-value as the key instruction from RowInstrMap. Iterate over the list
// of columns in ValueCols (it is defined as a list<list<string> >. Therefore,
// it can specify multi-column relationships). For each column, find the
// instruction from the group that matches all the values for the column.
// Multiple matches are not allowed.
//
//===----------------------------------------------------------------------===//
#include "CodeGenTarget.h"
#include "llvm/Support/Format.h"
#include "llvm/TableGen/Error.h"
using namespace llvm;
typedef std::map<std::string, std::vector<Record*> > InstrRelMapTy;
typedef std::map<std::vector<Init*>, std::vector<Record*> > RowInstrMapTy;
namespace {
//===----------------------------------------------------------------------===//
// This class is used to represent InstrMapping class defined in Target.td file.
class InstrMap {
private:
std::string Name;
std::string FilterClass;
ListInit *RowFields;
ListInit *ColFields;
ListInit *KeyCol;
std::vector<ListInit*> ValueCols;
public:
InstrMap(Record* MapRec) {
Name = MapRec->getName();
// FilterClass - It's used to reduce the search space only to the
// instructions that define the kind of relationship modeled by
// this InstrMapping object/record.
const RecordVal *Filter = MapRec->getValue("FilterClass");
FilterClass = Filter->getValue()->getAsUnquotedString();
// List of fields/attributes that need to be same across all the
// instructions in a row of the relation table.
RowFields = MapRec->getValueAsListInit("RowFields");
// List of fields/attributes that are constant across all the instruction
// in a column of the relation table. Ex: ColFields = 'predSense'
ColFields = MapRec->getValueAsListInit("ColFields");
// Values for the fields/attributes listed in 'ColFields'.
// Ex: KeyCol = 'noPred' -- key instruction is non-predicated
KeyCol = MapRec->getValueAsListInit("KeyCol");
// List of values for the fields/attributes listed in 'ColFields', one for
// each column in the relation table.
//
// Ex: ValueCols = [['true'],['false']] -- it results two columns in the
// table. First column requires all the instructions to have predSense
// set to 'true' and second column requires it to be 'false'.
ListInit *ColValList = MapRec->getValueAsListInit("ValueCols");
// Each instruction map must specify at least one column for it to be valid.
if (ColValList->empty())
PrintFatalError(MapRec->getLoc(), "InstrMapping record `" +
MapRec->getName() + "' has empty " + "`ValueCols' field!");
for (Init *I : ColValList->getValues()) {
ListInit *ColI = dyn_cast<ListInit>(I);
// Make sure that all the sub-lists in 'ValueCols' have same number of
// elements as the fields in 'ColFields'.
if (ColI->size() != ColFields->size())
PrintFatalError(MapRec->getLoc(), "Record `" + MapRec->getName() +
"', field `ValueCols' entries don't match with " +
" the entries in 'ColFields'!");
ValueCols.push_back(ColI);
}
}
std::string getName() const {
return Name;
}
std::string getFilterClass() {
return FilterClass;
}
ListInit *getRowFields() const {
return RowFields;
}
ListInit *getColFields() const {
return ColFields;
}
ListInit *getKeyCol() const {
return KeyCol;
}
const std::vector<ListInit*> &getValueCols() const {
return ValueCols;
}
};
} // End anonymous namespace.
//===----------------------------------------------------------------------===//
// class MapTableEmitter : It builds the instruction relation maps using
// the information provided in InstrMapping records. It outputs these
// relationship maps as tables into XXXGenInstrInfo.inc file along with the
// functions to query them.
namespace {
class MapTableEmitter {
private:
// std::string TargetName;
const CodeGenTarget &Target;
// InstrMapDesc - InstrMapping record to be processed.
InstrMap InstrMapDesc;
// InstrDefs - list of instructions filtered using FilterClass defined
// in InstrMapDesc.
std::vector<Record*> InstrDefs;
// RowInstrMap - maps RowFields values to the instructions. It's keyed by the
// values of the row fields and contains vector of records as values.
RowInstrMapTy RowInstrMap;
// KeyInstrVec - list of key instructions.
std::vector<Record*> KeyInstrVec;
DenseMap<Record*, std::vector<Record*> > MapTable;
public:
MapTableEmitter(CodeGenTarget &Target, RecordKeeper &Records, Record *IMRec):
Target(Target), InstrMapDesc(IMRec) {
const std::string FilterClass = InstrMapDesc.getFilterClass();
InstrDefs = Records.getAllDerivedDefinitions(FilterClass);
}
void buildRowInstrMap();
// Returns true if an instruction is a key instruction, i.e., its ColFields
// have same values as KeyCol.
bool isKeyColInstr(Record* CurInstr);
// Find column instruction corresponding to a key instruction based on the
// constraints for that column.
Record *getInstrForColumn(Record *KeyInstr, ListInit *CurValueCol);
// Find column instructions for each key instruction based
// on ValueCols and store them into MapTable.
void buildMapTable();
void emitBinSearch(raw_ostream &OS, unsigned TableSize);
void emitTablesWithFunc(raw_ostream &OS);
unsigned emitBinSearchTable(raw_ostream &OS);
// Lookup functions to query binary search tables.
void emitMapFuncBody(raw_ostream &OS, unsigned TableSize);
};
} // End anonymous namespace.
//===----------------------------------------------------------------------===//
// Process all the instructions that model this relation (alreday present in
// InstrDefs) and insert them into RowInstrMap which is keyed by the values of
// the fields listed as RowFields. It stores vectors of records as values.
// All the related instructions have the same values for the RowFields thus are
// part of the same key-value pair.
//===----------------------------------------------------------------------===//
void MapTableEmitter::buildRowInstrMap() {
for (Record *CurInstr : InstrDefs) {
std::vector<Init*> KeyValue;
ListInit *RowFields = InstrMapDesc.getRowFields();
for (Init *RowField : RowFields->getValues()) {
Init *CurInstrVal = CurInstr->getValue(RowField)->getValue();
KeyValue.push_back(CurInstrVal);
}
// Collect key instructions into KeyInstrVec. Later, these instructions are
// processed to assign column position to the instructions sharing
// their KeyValue in RowInstrMap.
if (isKeyColInstr(CurInstr))
KeyInstrVec.push_back(CurInstr);
RowInstrMap[KeyValue].push_back(CurInstr);
}
}
//===----------------------------------------------------------------------===//
// Return true if an instruction is a KeyCol instruction.
//===----------------------------------------------------------------------===//
bool MapTableEmitter::isKeyColInstr(Record* CurInstr) {
ListInit *ColFields = InstrMapDesc.getColFields();
ListInit *KeyCol = InstrMapDesc.getKeyCol();
// Check if the instruction is a KeyCol instruction.
bool MatchFound = true;
for (unsigned j = 0, endCF = ColFields->size();
(j < endCF) && MatchFound; j++) {
RecordVal *ColFieldName = CurInstr->getValue(ColFields->getElement(j));
std::string CurInstrVal = ColFieldName->getValue()->getAsUnquotedString();
std::string KeyColValue = KeyCol->getElement(j)->getAsUnquotedString();
MatchFound = (CurInstrVal == KeyColValue);
}
return MatchFound;
}
//===----------------------------------------------------------------------===//
// Build a map to link key instructions with the column instructions arranged
// according to their column positions.
//===----------------------------------------------------------------------===//
void MapTableEmitter::buildMapTable() {
// Find column instructions for a given key based on the ColField
// constraints.
const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
unsigned NumOfCols = ValueCols.size();
for (Record *CurKeyInstr : KeyInstrVec) {
std::vector<Record*> ColInstrVec(NumOfCols);
// Find the column instruction based on the constraints for the column.
for (unsigned ColIdx = 0; ColIdx < NumOfCols; ColIdx++) {
ListInit *CurValueCol = ValueCols[ColIdx];
Record *ColInstr = getInstrForColumn(CurKeyInstr, CurValueCol);
ColInstrVec[ColIdx] = ColInstr;
}
MapTable[CurKeyInstr] = ColInstrVec;
}
}
//===----------------------------------------------------------------------===//
// Find column instruction based on the constraints for that column.
//===----------------------------------------------------------------------===//
Record *MapTableEmitter::getInstrForColumn(Record *KeyInstr,
ListInit *CurValueCol) {
ListInit *RowFields = InstrMapDesc.getRowFields();
std::vector<Init*> KeyValue;
// Construct KeyValue using KeyInstr's values for RowFields.
for (Init *RowField : RowFields->getValues()) {
Init *KeyInstrVal = KeyInstr->getValue(RowField)->getValue();
KeyValue.push_back(KeyInstrVal);
}
// Get all the instructions that share the same KeyValue as the KeyInstr
// in RowInstrMap. We search through these instructions to find a match
// for the current column, i.e., the instruction which has the same values
// as CurValueCol for all the fields in ColFields.
const std::vector<Record*> &RelatedInstrVec = RowInstrMap[KeyValue];
ListInit *ColFields = InstrMapDesc.getColFields();
Record *MatchInstr = nullptr;
for (unsigned i = 0, e = RelatedInstrVec.size(); i < e; i++) {
bool MatchFound = true;
Record *CurInstr = RelatedInstrVec[i];
for (unsigned j = 0, endCF = ColFields->size();
(j < endCF) && MatchFound; j++) {
Init *ColFieldJ = ColFields->getElement(j);
Init *CurInstrInit = CurInstr->getValue(ColFieldJ)->getValue();
std::string CurInstrVal = CurInstrInit->getAsUnquotedString();
Init *ColFieldJVallue = CurValueCol->getElement(j);
MatchFound = (CurInstrVal == ColFieldJVallue->getAsUnquotedString());
}
if (MatchFound) {
if (MatchInstr) // Already had a match
// Error if multiple matches are found for a column.
PrintFatalError("Multiple matches found for `" + KeyInstr->getName() +
"', for the relation `" + InstrMapDesc.getName());
MatchInstr = CurInstr;
}
}
return MatchInstr;
}
//===----------------------------------------------------------------------===//
// Emit one table per relation. Only instructions with a valid relation of a
// given type are included in the table sorted by their enum values (opcodes).
// Binary search is used for locating instructions in the table.
//===----------------------------------------------------------------------===//
unsigned MapTableEmitter::emitBinSearchTable(raw_ostream &OS) {
const std::vector<const CodeGenInstruction*> &NumberedInstructions =
Target.getInstructionsByEnumValue();
std::string TargetName = Target.getName();
const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
unsigned NumCol = ValueCols.size();
unsigned TotalNumInstr = NumberedInstructions.size();
unsigned TableSize = 0;
OS << "static const uint16_t "<<InstrMapDesc.getName();
// Number of columns in the table are NumCol+1 because key instructions are
// emitted as first column.
OS << "Table[]["<< NumCol+1 << "] = {\n";
for (unsigned i = 0; i < TotalNumInstr; i++) {
Record *CurInstr = NumberedInstructions[i]->TheDef;
std::vector<Record*> ColInstrs = MapTable[CurInstr];
std::string OutStr("");
unsigned RelExists = 0;
if (!ColInstrs.empty()) {
for (unsigned j = 0; j < NumCol; j++) {
if (ColInstrs[j] != nullptr) {
RelExists = 1;
OutStr += ", ";
OutStr += TargetName;
OutStr += "::";
OutStr += ColInstrs[j]->getName();
} else { OutStr += ", (uint16_t)-1U";}
}
if (RelExists) {
OS << " { " << TargetName << "::" << CurInstr->getName();
OS << OutStr <<" },\n";
TableSize++;
}
}
}
if (!TableSize) {
OS << " { " << TargetName << "::" << "INSTRUCTION_LIST_END, ";
OS << TargetName << "::" << "INSTRUCTION_LIST_END }";
}
OS << "}; // End of " << InstrMapDesc.getName() << "Table\n\n";
return TableSize;
}
//===----------------------------------------------------------------------===//
// Emit binary search algorithm as part of the functions used to query
// relation tables.
//===----------------------------------------------------------------------===//
void MapTableEmitter::emitBinSearch(raw_ostream &OS, unsigned TableSize) {
OS << " unsigned mid;\n";
OS << " unsigned start = 0;\n";
OS << " unsigned end = " << TableSize << ";\n";
OS << " while (start < end) {\n";
OS << " mid = start + (end - start)/2;\n";
OS << " if (Opcode == " << InstrMapDesc.getName() << "Table[mid][0]) {\n";
OS << " break;\n";
OS << " }\n";
OS << " if (Opcode < " << InstrMapDesc.getName() << "Table[mid][0])\n";
OS << " end = mid;\n";
OS << " else\n";
OS << " start = mid + 1;\n";
OS << " }\n";
OS << " if (start == end)\n";
OS << " return -1; // Instruction doesn't exist in this table.\n\n";
}
//===----------------------------------------------------------------------===//
// Emit functions to query relation tables.
//===----------------------------------------------------------------------===//
void MapTableEmitter::emitMapFuncBody(raw_ostream &OS,
unsigned TableSize) {
ListInit *ColFields = InstrMapDesc.getColFields();
const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
// Emit binary search algorithm to locate instructions in the
// relation table. If found, return opcode value from the appropriate column
// of the table.
emitBinSearch(OS, TableSize);
if (ValueCols.size() > 1) {
for (unsigned i = 0, e = ValueCols.size(); i < e; i++) {
ListInit *ColumnI = ValueCols[i];
for (unsigned j = 0, ColSize = ColumnI->size(); j < ColSize; ++j) {
std::string ColName = ColFields->getElement(j)->getAsUnquotedString();
OS << " if (in" << ColName;
OS << " == ";
OS << ColName << "_" << ColumnI->getElement(j)->getAsUnquotedString();
if (j < ColumnI->size() - 1) OS << " && ";
else OS << ")\n";
}
OS << " return " << InstrMapDesc.getName();
OS << "Table[mid]["<<i+1<<"];\n";
}
OS << " return -1;";
}
else
OS << " return " << InstrMapDesc.getName() << "Table[mid][1];\n";
OS <<"}\n\n";
}
//===----------------------------------------------------------------------===//
// Emit relation tables and the functions to query them.
//===----------------------------------------------------------------------===//
void MapTableEmitter::emitTablesWithFunc(raw_ostream &OS) {
// Emit function name and the input parameters : mostly opcode value of the
// current instruction. However, if a table has multiple columns (more than 2
// since first column is used for the key instructions), then we also need
// to pass another input to indicate the column to be selected.
ListInit *ColFields = InstrMapDesc.getColFields();
const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
OS << "// "<< InstrMapDesc.getName() << "\n";
OS << "int "<< InstrMapDesc.getName() << "(uint16_t Opcode";
if (ValueCols.size() > 1) {
for (Init *CF : ColFields->getValues()) {
std::string ColName = CF->getAsUnquotedString();
OS << ", enum " << ColName << " in" << ColName << ") {\n";
}
} else { OS << ") {\n"; }
// Emit map table.
unsigned TableSize = emitBinSearchTable(OS);
// Emit rest of the function body.
emitMapFuncBody(OS, TableSize);
}
//===----------------------------------------------------------------------===//
// Emit enums for the column fields across all the instruction maps.
//===----------------------------------------------------------------------===//
static void emitEnums(raw_ostream &OS, RecordKeeper &Records) {
std::vector<Record*> InstrMapVec;
InstrMapVec = Records.getAllDerivedDefinitions("InstrMapping");
std::map<std::string, std::vector<Init*> > ColFieldValueMap;
// Iterate over all InstrMapping records and create a map between column
// fields and their possible values across all records.
for (unsigned i = 0, e = InstrMapVec.size(); i < e; i++) {
Record *CurMap = InstrMapVec[i];
ListInit *ColFields;
ColFields = CurMap->getValueAsListInit("ColFields");
ListInit *List = CurMap->getValueAsListInit("ValueCols");
std::vector<ListInit*> ValueCols;
unsigned ListSize = List->size();
for (unsigned j = 0; j < ListSize; j++) {
ListInit *ListJ = dyn_cast<ListInit>(List->getElement(j));
if (ListJ->size() != ColFields->size())
PrintFatalError("Record `" + CurMap->getName() + "', field "
"`ValueCols' entries don't match with the entries in 'ColFields' !");
ValueCols.push_back(ListJ);
}
for (unsigned j = 0, endCF = ColFields->size(); j < endCF; j++) {
for (unsigned k = 0; k < ListSize; k++){
std::string ColName = ColFields->getElement(j)->getAsUnquotedString();
ColFieldValueMap[ColName].push_back((ValueCols[k])->getElement(j));
}
}
}
for (std::map<std::string, std::vector<Init*> >::iterator
II = ColFieldValueMap.begin(), IE = ColFieldValueMap.end();
II != IE; II++) {
std::vector<Init*> FieldValues = (*II).second;
// Delete duplicate entries from ColFieldValueMap
for (unsigned i = 0; i < FieldValues.size() - 1; i++) {
Init *CurVal = FieldValues[i];
for (unsigned j = i+1; j < FieldValues.size(); j++) {
if (CurVal == FieldValues[j]) {
FieldValues.erase(FieldValues.begin()+j);
}
}
}
// Emit enumerated values for the column fields.
OS << "enum " << (*II).first << " {\n";
for (unsigned i = 0, endFV = FieldValues.size(); i < endFV; i++) {
OS << "\t" << (*II).first << "_" << FieldValues[i]->getAsUnquotedString();
if (i != endFV - 1)
OS << ",\n";
else
OS << "\n};\n\n";
}
}
}
namespace llvm {
//===----------------------------------------------------------------------===//
// Parse 'InstrMapping' records and use the information to form relationship
// between instructions. These relations are emitted as a tables along with the
// functions to query them.
//===----------------------------------------------------------------------===//
void EmitMapTable(RecordKeeper &Records, raw_ostream &OS) {
CodeGenTarget Target(Records);
std::string TargetName = Target.getName();
std::vector<Record*> InstrMapVec;
InstrMapVec = Records.getAllDerivedDefinitions("InstrMapping");
if (InstrMapVec.empty())
return;
OS << "#ifdef GET_INSTRMAP_INFO\n";
OS << "#undef GET_INSTRMAP_INFO\n";
OS << "namespace llvm {\n\n";
OS << "namespace " << TargetName << " {\n\n";
// Emit coulumn field names and their values as enums.
emitEnums(OS, Records);
// Iterate over all instruction mapping records and construct relationship
// maps based on the information specified there.
//
for (unsigned i = 0, e = InstrMapVec.size(); i < e; i++) {
MapTableEmitter IMap(Target, Records, InstrMapVec[i]);
// Build RowInstrMap to group instructions based on their values for
// RowFields. In the process, also collect key instructions into
// KeyInstrVec.
IMap.buildRowInstrMap();
// Build MapTable to map key instructions with the corresponding column
// instructions.
IMap.buildMapTable();
// Emit map tables and the functions to query them.
IMap.emitTablesWithFunc(OS);
}
OS << "} // End " << TargetName << " namespace\n";
OS << "} // End llvm namespace\n";
OS << "#endif // GET_INSTRMAP_INFO\n\n";
}
} // End llvm namespace
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/AsmWriterInst.h | //===- AsmWriterInst.h - Classes encapsulating a printable inst -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// These classes implement a parser for assembly strings. The parser splits
// the string into operands, which can be literal strings (the constant bits of
// the string), actual operands (i.e., operands from the MachineInstr), and
// dynamically-generated text, specified by raw C++ code.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_UTILS_TABLEGEN_ASMWRITERINST_H
#define LLVM_UTILS_TABLEGEN_ASMWRITERINST_H
#include <string>
#include <vector>
namespace llvm {
class CodeGenInstruction;
class Record;
struct AsmWriterOperand {
enum OpType {
// Output this text surrounded by quotes to the asm.
isLiteralTextOperand,
// This is the name of a routine to call to print the operand.
isMachineInstrOperand,
// Output this text verbatim to the asm writer. It is code that
// will output some text to the asm.
isLiteralStatementOperand
} OperandType;
/// Str - For isLiteralTextOperand, this IS the literal text. For
/// isMachineInstrOperand, this is the PrinterMethodName for the operand..
/// For isLiteralStatementOperand, this is the code to insert verbatim
/// into the asm writer.
std::string Str;
/// CGIOpNo - For isMachineInstrOperand, this is the index of the operand in
/// the CodeGenInstruction.
unsigned CGIOpNo;
/// MiOpNo - For isMachineInstrOperand, this is the operand number of the
/// machine instruction.
unsigned MIOpNo;
/// MiModifier - For isMachineInstrOperand, this is the modifier string for
/// an operand, specified with syntax like ${opname:modifier}.
std::string MiModifier;
// PassSubtarget - Pass MCSubtargetInfo to the print method if this is
// equal to 1.
// FIXME: Remove after all ports are updated.
unsigned PassSubtarget;
// To make VS STL happy
AsmWriterOperand(OpType op = isLiteralTextOperand):OperandType(op) {}
AsmWriterOperand(const std::string &LitStr,
OpType op = isLiteralTextOperand)
: OperandType(op), Str(LitStr) {}
AsmWriterOperand(const std::string &Printer,
unsigned _CGIOpNo,
unsigned _MIOpNo,
const std::string &Modifier,
unsigned PassSubtarget,
OpType op = isMachineInstrOperand)
: OperandType(op), Str(Printer), CGIOpNo(_CGIOpNo), MIOpNo(_MIOpNo),
MiModifier(Modifier), PassSubtarget(PassSubtarget) {}
bool operator!=(const AsmWriterOperand &Other) const {
if (OperandType != Other.OperandType || Str != Other.Str) return true;
if (OperandType == isMachineInstrOperand)
return MIOpNo != Other.MIOpNo || MiModifier != Other.MiModifier;
return false;
}
bool operator==(const AsmWriterOperand &Other) const {
return !operator!=(Other);
}
/// getCode - Return the code that prints this operand.
std::string getCode() const;
};
class AsmWriterInst {
public:
std::vector<AsmWriterOperand> Operands;
const CodeGenInstruction *CGI;
AsmWriterInst(const CodeGenInstruction &CGI,
unsigned Variant, unsigned PassSubtarget);
/// MatchesAllButOneOp - If this instruction is exactly identical to the
/// specified instruction except for one differing operand, return the
/// differing operand number. Otherwise return ~0.
unsigned MatchesAllButOneOp(const AsmWriterInst &Other) const;
private:
void AddLiteralString(const std::string &Str) {
// If the last operand was already a literal text string, append this to
// it, otherwise add a new operand.
if (!Operands.empty() &&
Operands.back().OperandType == AsmWriterOperand::isLiteralTextOperand)
Operands.back().Str.append(Str);
else
Operands.push_back(AsmWriterOperand(Str));
}
};
}
#endif
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/CodeGenTarget.cpp | //===- CodeGenTarget.cpp - CodeGen Target Class Wrapper -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class wraps target description classes used by the various code
// generation TableGen backends. This makes it easier to access the data and
// provides a single place that needs to check it for validity. All of these
// classes abort on error conditions.
//
//===----------------------------------------------------------------------===//
#include "CodeGenTarget.h"
#include "CodeGenIntrinsics.h"
#include "CodeGenSchedule.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include <algorithm>
using namespace llvm;
static cl::opt<unsigned>
AsmParserNum("asmparsernum", cl::init(0),
cl::desc("Make -gen-asm-parser emit assembly parser #N"));
static cl::opt<unsigned>
AsmWriterNum("asmwriternum", cl::init(0),
cl::desc("Make -gen-asm-writer emit assembly writer #N"));
/// getValueType - Return the MVT::SimpleValueType that the specified TableGen
/// record corresponds to.
MVT::SimpleValueType llvm::getValueType(Record *Rec) {
return (MVT::SimpleValueType)Rec->getValueAsInt("Value");
}
std::string llvm::getName(MVT::SimpleValueType T) {
switch (T) {
case MVT::Other: return "UNKNOWN";
case MVT::iPTR: return "TLI.getPointerTy()";
case MVT::iPTRAny: return "TLI.getPointerTy()";
default: return getEnumName(T);
}
}
std::string llvm::getEnumName(MVT::SimpleValueType T) {
switch (T) {
case MVT::Other: return "MVT::Other";
case MVT::i1: return "MVT::i1";
case MVT::i8: return "MVT::i8";
case MVT::i16: return "MVT::i16";
case MVT::i32: return "MVT::i32";
case MVT::i64: return "MVT::i64";
case MVT::i128: return "MVT::i128";
case MVT::Any: return "MVT::Any";
case MVT::iAny: return "MVT::iAny";
case MVT::fAny: return "MVT::fAny";
case MVT::vAny: return "MVT::vAny";
case MVT::f16: return "MVT::f16";
case MVT::f32: return "MVT::f32";
case MVT::f64: return "MVT::f64";
case MVT::f80: return "MVT::f80";
case MVT::f128: return "MVT::f128";
case MVT::ppcf128: return "MVT::ppcf128";
case MVT::x86mmx: return "MVT::x86mmx";
case MVT::Glue: return "MVT::Glue";
case MVT::isVoid: return "MVT::isVoid";
case MVT::v2i1: return "MVT::v2i1";
case MVT::v4i1: return "MVT::v4i1";
case MVT::v8i1: return "MVT::v8i1";
case MVT::v16i1: return "MVT::v16i1";
case MVT::v32i1: return "MVT::v32i1";
case MVT::v64i1: return "MVT::v64i1";
case MVT::v1i8: return "MVT::v1i8";
case MVT::v2i8: return "MVT::v2i8";
case MVT::v4i8: return "MVT::v4i8";
case MVT::v8i8: return "MVT::v8i8";
case MVT::v16i8: return "MVT::v16i8";
case MVT::v32i8: return "MVT::v32i8";
case MVT::v64i8: return "MVT::v64i8";
case MVT::v1i16: return "MVT::v1i16";
case MVT::v2i16: return "MVT::v2i16";
case MVT::v4i16: return "MVT::v4i16";
case MVT::v8i16: return "MVT::v8i16";
case MVT::v16i16: return "MVT::v16i16";
case MVT::v32i16: return "MVT::v32i16";
case MVT::v1i32: return "MVT::v1i32";
case MVT::v2i32: return "MVT::v2i32";
case MVT::v4i32: return "MVT::v4i32";
case MVT::v8i32: return "MVT::v8i32";
case MVT::v16i32: return "MVT::v16i32";
case MVT::v1i64: return "MVT::v1i64";
case MVT::v2i64: return "MVT::v2i64";
case MVT::v4i64: return "MVT::v4i64";
case MVT::v8i64: return "MVT::v8i64";
case MVT::v16i64: return "MVT::v16i64";
case MVT::v1i128: return "MVT::v1i128";
case MVT::v2f16: return "MVT::v2f16";
case MVT::v4f16: return "MVT::v4f16";
case MVT::v8f16: return "MVT::v8f16";
case MVT::v1f32: return "MVT::v1f32";
case MVT::v2f32: return "MVT::v2f32";
case MVT::v4f32: return "MVT::v4f32";
case MVT::v8f32: return "MVT::v8f32";
case MVT::v16f32: return "MVT::v16f32";
case MVT::v1f64: return "MVT::v1f64";
case MVT::v2f64: return "MVT::v2f64";
case MVT::v4f64: return "MVT::v4f64";
case MVT::v8f64: return "MVT::v8f64";
case MVT::Metadata: return "MVT::Metadata";
case MVT::iPTR: return "MVT::iPTR";
case MVT::iPTRAny: return "MVT::iPTRAny";
case MVT::Untyped: return "MVT::Untyped";
default: llvm_unreachable("ILLEGAL VALUE TYPE!");
}
}
/// getQualifiedName - Return the name of the specified record, with a
/// namespace qualifier if the record contains one.
///
std::string llvm::getQualifiedName(const Record *R) {
std::string Namespace;
if (R->getValue("Namespace"))
Namespace = R->getValueAsString("Namespace");
if (Namespace.empty()) return R->getName();
return Namespace + "::" + R->getName();
}
/// getTarget - Return the current instance of the Target class.
///
CodeGenTarget::CodeGenTarget(RecordKeeper &records)
: Records(records) {
std::vector<Record*> Targets = Records.getAllDerivedDefinitions("Target");
if (Targets.size() == 0)
PrintFatalError("ERROR: No 'Target' subclasses defined!");
if (Targets.size() != 1)
PrintFatalError("ERROR: Multiple subclasses of Target defined!");
TargetRec = Targets[0];
}
CodeGenTarget::~CodeGenTarget() {
}
const std::string &CodeGenTarget::getName() const {
return TargetRec->getName();
}
std::string CodeGenTarget::getInstNamespace() const {
for (const CodeGenInstruction *Inst : instructions()) {
// Make sure not to pick up "TargetOpcode" by accidentally getting
// the namespace off the PHI instruction or something.
if (Inst->Namespace != "TargetOpcode")
return Inst->Namespace;
}
return "";
}
Record *CodeGenTarget::getInstructionSet() const {
return TargetRec->getValueAsDef("InstructionSet");
}
/// getAsmParser - Return the AssemblyParser definition for this target.
///
Record *CodeGenTarget::getAsmParser() const {
std::vector<Record*> LI = TargetRec->getValueAsListOfDefs("AssemblyParsers");
if (AsmParserNum >= LI.size())
PrintFatalError("Target does not have an AsmParser #" +
Twine(AsmParserNum) + "!");
return LI[AsmParserNum];
}
/// getAsmParserVariant - Return the AssmblyParserVariant definition for
/// this target.
///
Record *CodeGenTarget::getAsmParserVariant(unsigned i) const {
std::vector<Record*> LI =
TargetRec->getValueAsListOfDefs("AssemblyParserVariants");
if (i >= LI.size())
PrintFatalError("Target does not have an AsmParserVariant #" + Twine(i) +
"!");
return LI[i];
}
/// getAsmParserVariantCount - Return the AssmblyParserVariant definition
/// available for this target.
///
unsigned CodeGenTarget::getAsmParserVariantCount() const {
std::vector<Record*> LI =
TargetRec->getValueAsListOfDefs("AssemblyParserVariants");
return LI.size();
}
/// getAsmWriter - Return the AssemblyWriter definition for this target.
///
Record *CodeGenTarget::getAsmWriter() const {
std::vector<Record*> LI = TargetRec->getValueAsListOfDefs("AssemblyWriters");
if (AsmWriterNum >= LI.size())
PrintFatalError("Target does not have an AsmWriter #" +
Twine(AsmWriterNum) + "!");
return LI[AsmWriterNum];
}
CodeGenRegBank &CodeGenTarget::getRegBank() const {
if (!RegBank)
RegBank = llvm::make_unique<CodeGenRegBank>(Records);
return *RegBank;
}
void CodeGenTarget::ReadRegAltNameIndices() const {
RegAltNameIndices = Records.getAllDerivedDefinitions("RegAltNameIndex");
std::sort(RegAltNameIndices.begin(), RegAltNameIndices.end(), LessRecord());
}
/// getRegisterByName - If there is a register with the specific AsmName,
/// return it.
const CodeGenRegister *CodeGenTarget::getRegisterByName(StringRef Name) const {
const StringMap<CodeGenRegister*> &Regs = getRegBank().getRegistersByName();
StringMap<CodeGenRegister*>::const_iterator I = Regs.find(Name);
if (I == Regs.end())
return nullptr;
return I->second;
}
std::vector<MVT::SimpleValueType> CodeGenTarget::
getRegisterVTs(Record *R) const {
const CodeGenRegister *Reg = getRegBank().getReg(R);
std::vector<MVT::SimpleValueType> Result;
for (const auto &RC : getRegBank().getRegClasses()) {
if (RC.contains(Reg)) {
ArrayRef<MVT::SimpleValueType> InVTs = RC.getValueTypes();
Result.insert(Result.end(), InVTs.begin(), InVTs.end());
}
}
// Remove duplicates.
array_pod_sort(Result.begin(), Result.end());
Result.erase(std::unique(Result.begin(), Result.end()), Result.end());
return Result;
}
void CodeGenTarget::ReadLegalValueTypes() const {
for (const auto &RC : getRegBank().getRegClasses())
LegalValueTypes.insert(LegalValueTypes.end(), RC.VTs.begin(), RC.VTs.end());
// Remove duplicates.
std::sort(LegalValueTypes.begin(), LegalValueTypes.end());
LegalValueTypes.erase(std::unique(LegalValueTypes.begin(),
LegalValueTypes.end()),
LegalValueTypes.end());
}
CodeGenSchedModels &CodeGenTarget::getSchedModels() const {
if (!SchedModels)
SchedModels = llvm::make_unique<CodeGenSchedModels>(Records, *this);
return *SchedModels;
}
void CodeGenTarget::ReadInstructions() const {
std::vector<Record*> Insts = Records.getAllDerivedDefinitions("Instruction");
if (Insts.size() <= 2)
PrintFatalError("No 'Instruction' subclasses defined!");
// Parse the instructions defined in the .td file.
for (unsigned i = 0, e = Insts.size(); i != e; ++i)
Instructions[Insts[i]] = llvm::make_unique<CodeGenInstruction>(Insts[i]);
}
static const CodeGenInstruction *
GetInstByName(const char *Name,
const DenseMap<const Record*,
std::unique_ptr<CodeGenInstruction>> &Insts,
RecordKeeper &Records) {
const Record *Rec = Records.getDef(Name);
const auto I = Insts.find(Rec);
if (!Rec || I == Insts.end())
PrintFatalError(Twine("Could not find '") + Name + "' instruction!");
return I->second.get();
}
/// \brief Return all of the instructions defined by the target, ordered by
/// their enum value.
void CodeGenTarget::ComputeInstrsByEnum() const {
// The ordering here must match the ordering in TargetOpcodes.h.
static const char *const FixedInstrs[] = {
"PHI", "INLINEASM", "CFI_INSTRUCTION", "EH_LABEL",
"GC_LABEL", "KILL", "EXTRACT_SUBREG", "INSERT_SUBREG",
"IMPLICIT_DEF", "SUBREG_TO_REG", "COPY_TO_REGCLASS", "DBG_VALUE",
"REG_SEQUENCE", "COPY", "BUNDLE", "LIFETIME_START",
"LIFETIME_END", "STACKMAP", "PATCHPOINT", "LOAD_STACK_GUARD",
"STATEPOINT", "LOCAL_ESCAPE", "FAULTING_LOAD_OP",
nullptr};
const auto &Insts = getInstructions();
for (const char *const *p = FixedInstrs; *p; ++p) {
const CodeGenInstruction *Instr = GetInstByName(*p, Insts, Records);
assert(Instr && "Missing target independent instruction");
assert(Instr->Namespace == "TargetOpcode" && "Bad namespace");
InstrsByEnum.push_back(Instr);
}
unsigned EndOfPredefines = InstrsByEnum.size();
for (const auto &I : Insts) {
const CodeGenInstruction *CGI = I.second.get();
if (CGI->Namespace != "TargetOpcode")
InstrsByEnum.push_back(CGI);
}
assert(InstrsByEnum.size() == Insts.size() && "Missing predefined instr");
// All of the instructions are now in random order based on the map iteration.
// Sort them by name.
std::sort(InstrsByEnum.begin() + EndOfPredefines, InstrsByEnum.end(),
[](const CodeGenInstruction *Rec1, const CodeGenInstruction *Rec2) {
return Rec1->TheDef->getName() < Rec2->TheDef->getName();
});
}
/// isLittleEndianEncoding - Return whether this target encodes its instruction
/// in little-endian format, i.e. bits laid out in the order [0..n]
///
bool CodeGenTarget::isLittleEndianEncoding() const {
return getInstructionSet()->getValueAsBit("isLittleEndianEncoding");
}
/// reverseBitsForLittleEndianEncoding - For little-endian instruction bit
/// encodings, reverse the bit order of all instructions.
void CodeGenTarget::reverseBitsForLittleEndianEncoding() {
if (!isLittleEndianEncoding())
return;
std::vector<Record*> Insts = Records.getAllDerivedDefinitions("Instruction");
for (Record *R : Insts) {
if (R->getValueAsString("Namespace") == "TargetOpcode" ||
R->getValueAsBit("isPseudo"))
continue;
BitsInit *BI = R->getValueAsBitsInit("Inst");
unsigned numBits = BI->getNumBits();
SmallVector<Init *, 16> NewBits(numBits);
for (unsigned bit = 0, end = numBits / 2; bit != end; ++bit) {
unsigned bitSwapIdx = numBits - bit - 1;
Init *OrigBit = BI->getBit(bit);
Init *BitSwap = BI->getBit(bitSwapIdx);
NewBits[bit] = BitSwap;
NewBits[bitSwapIdx] = OrigBit;
}
if (numBits % 2) {
unsigned middle = (numBits + 1) / 2;
NewBits[middle] = BI->getBit(middle);
}
BitsInit *NewBI = BitsInit::get(NewBits);
// Update the bits in reversed order so that emitInstrOpBits will get the
// correct endianness.
R->getValue("Inst")->setValue(NewBI);
}
}
/// guessInstructionProperties - Return true if it's OK to guess instruction
/// properties instead of raising an error.
///
/// This is configurable as a temporary migration aid. It will eventually be
/// permanently false.
bool CodeGenTarget::guessInstructionProperties() const {
return getInstructionSet()->getValueAsBit("guessInstructionProperties");
}
//===----------------------------------------------------------------------===//
// ComplexPattern implementation
//
ComplexPattern::ComplexPattern(Record *R) {
Ty = ::getValueType(R->getValueAsDef("Ty"));
NumOperands = R->getValueAsInt("NumOperands");
SelectFunc = R->getValueAsString("SelectFunc");
RootNodes = R->getValueAsListOfDefs("RootNodes");
// Parse the properties.
Properties = 0;
std::vector<Record*> PropList = R->getValueAsListOfDefs("Properties");
for (unsigned i = 0, e = PropList.size(); i != e; ++i)
if (PropList[i]->getName() == "SDNPHasChain") {
Properties |= 1 << SDNPHasChain;
} else if (PropList[i]->getName() == "SDNPOptInGlue") {
Properties |= 1 << SDNPOptInGlue;
} else if (PropList[i]->getName() == "SDNPMayStore") {
Properties |= 1 << SDNPMayStore;
} else if (PropList[i]->getName() == "SDNPMayLoad") {
Properties |= 1 << SDNPMayLoad;
} else if (PropList[i]->getName() == "SDNPSideEffect") {
Properties |= 1 << SDNPSideEffect;
} else if (PropList[i]->getName() == "SDNPMemOperand") {
Properties |= 1 << SDNPMemOperand;
} else if (PropList[i]->getName() == "SDNPVariadic") {
Properties |= 1 << SDNPVariadic;
} else if (PropList[i]->getName() == "SDNPWantRoot") {
Properties |= 1 << SDNPWantRoot;
} else if (PropList[i]->getName() == "SDNPWantParent") {
Properties |= 1 << SDNPWantParent;
} else {
PrintFatalError("Unsupported SD Node property '" +
PropList[i]->getName() + "' on ComplexPattern '" +
R->getName() + "'!");
}
}
//===----------------------------------------------------------------------===//
// CodeGenIntrinsic Implementation
//===----------------------------------------------------------------------===//
std::vector<CodeGenIntrinsic> llvm::LoadIntrinsics(const RecordKeeper &RC,
bool TargetOnly) {
std::vector<Record*> I = RC.getAllDerivedDefinitions("Intrinsic");
std::vector<CodeGenIntrinsic> Result;
for (unsigned i = 0, e = I.size(); i != e; ++i) {
bool isTarget = I[i]->getValueAsBit("isTarget");
if (isTarget == TargetOnly)
Result.push_back(CodeGenIntrinsic(I[i]));
}
return Result;
}
CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
TheDef = R;
std::string DefName = R->getName();
ModRef = ReadWriteMem;
isOverloaded = false;
isCommutative = false;
canThrow = false;
isNoReturn = false;
isNoDuplicate = false;
isConvergent = false;
if (DefName.size() <= 4 ||
std::string(DefName.begin(), DefName.begin() + 4) != "int_")
PrintFatalError("Intrinsic '" + DefName + "' does not start with 'int_'!");
EnumName = std::string(DefName.begin()+4, DefName.end());
if (R->getValue("GCCBuiltinName")) // Ignore a missing GCCBuiltinName field.
GCCBuiltinName = R->getValueAsString("GCCBuiltinName");
if (R->getValue("MSBuiltinName")) // Ignore a missing MSBuiltinName field.
MSBuiltinName = R->getValueAsString("MSBuiltinName");
TargetPrefix = R->getValueAsString("TargetPrefix");
Name = R->getValueAsString("LLVMName");
if (Name == "") {
// If an explicit name isn't specified, derive one from the DefName.
Name = "llvm.";
for (unsigned i = 0, e = EnumName.size(); i != e; ++i)
Name += (EnumName[i] == '_') ? '.' : EnumName[i];
} else {
// Verify it starts with "llvm.".
if (Name.size() <= 5 ||
std::string(Name.begin(), Name.begin() + 5) != "llvm.")
PrintFatalError("Intrinsic '" + DefName + "'s name does not start with 'llvm.'!");
}
// If TargetPrefix is specified, make sure that Name starts with
// "llvm.<targetprefix>.".
if (!TargetPrefix.empty()) {
if (Name.size() < 6+TargetPrefix.size() ||
std::string(Name.begin() + 5, Name.begin() + 6 + TargetPrefix.size())
!= (TargetPrefix + "."))
PrintFatalError("Intrinsic '" + DefName + "' does not start with 'llvm." +
TargetPrefix + ".'!");
}
// Parse the list of return types.
std::vector<MVT::SimpleValueType> OverloadedVTs;
ListInit *TypeList = R->getValueAsListInit("RetTypes");
for (unsigned i = 0, e = TypeList->size(); i != e; ++i) {
Record *TyEl = TypeList->getElementAsRecord(i);
assert(TyEl->isSubClassOf("LLVMType") && "Expected a type!");
MVT::SimpleValueType VT;
if (TyEl->isSubClassOf("LLVMMatchType")) {
unsigned MatchTy = TyEl->getValueAsInt("Number");
assert(MatchTy < OverloadedVTs.size() &&
"Invalid matching number!");
VT = OverloadedVTs[MatchTy];
// It only makes sense to use the extended and truncated vector element
// variants with iAny types; otherwise, if the intrinsic is not
// overloaded, all the types can be specified directly.
assert(((!TyEl->isSubClassOf("LLVMExtendedType") &&
!TyEl->isSubClassOf("LLVMTruncatedType")) ||
VT == MVT::iAny || VT == MVT::vAny) &&
"Expected iAny or vAny type");
} else {
VT = getValueType(TyEl->getValueAsDef("VT"));
}
if (MVT(VT).isOverloaded()) {
OverloadedVTs.push_back(VT);
isOverloaded = true;
}
// Reject invalid types.
if (VT == MVT::isVoid)
PrintFatalError("Intrinsic '" + DefName + " has void in result type list!");
IS.RetVTs.push_back(VT);
IS.RetTypeDefs.push_back(TyEl);
}
// Parse the list of parameter types.
TypeList = R->getValueAsListInit("ParamTypes");
for (unsigned i = 0, e = TypeList->size(); i != e; ++i) {
Record *TyEl = TypeList->getElementAsRecord(i);
assert(TyEl->isSubClassOf("LLVMType") && "Expected a type!");
MVT::SimpleValueType VT;
if (TyEl->isSubClassOf("LLVMMatchType")) {
unsigned MatchTy = TyEl->getValueAsInt("Number");
assert(MatchTy < OverloadedVTs.size() &&
"Invalid matching number!");
VT = OverloadedVTs[MatchTy];
// It only makes sense to use the extended and truncated vector element
// variants with iAny types; otherwise, if the intrinsic is not
// overloaded, all the types can be specified directly.
assert(((!TyEl->isSubClassOf("LLVMExtendedType") &&
!TyEl->isSubClassOf("LLVMTruncatedType") &&
!TyEl->isSubClassOf("LLVMVectorSameWidth") &&
!TyEl->isSubClassOf("LLVMPointerToElt")) ||
VT == MVT::iAny || VT == MVT::vAny) &&
"Expected iAny or vAny type");
} else
VT = getValueType(TyEl->getValueAsDef("VT"));
if (MVT(VT).isOverloaded()) {
OverloadedVTs.push_back(VT);
isOverloaded = true;
}
// Reject invalid types.
if (VT == MVT::isVoid && i != e-1 /*void at end means varargs*/)
PrintFatalError("Intrinsic '" + DefName + " has void in result type list!");
IS.ParamVTs.push_back(VT);
IS.ParamTypeDefs.push_back(TyEl);
}
// Parse the intrinsic properties.
ListInit *PropList = R->getValueAsListInit("Properties");
for (unsigned i = 0, e = PropList->size(); i != e; ++i) {
Record *Property = PropList->getElementAsRecord(i);
assert(Property->isSubClassOf("IntrinsicProperty") &&
"Expected a property!");
if (Property->getName() == "IntrNoMem")
ModRef = NoMem;
else if (Property->getName() == "IntrReadArgMem")
ModRef = ReadArgMem;
else if (Property->getName() == "IntrReadMem")
ModRef = ReadMem;
else if (Property->getName() == "IntrReadWriteArgMem")
ModRef = ReadWriteArgMem;
else if (Property->getName() == "Commutative")
isCommutative = true;
else if (Property->getName() == "Throws")
canThrow = true;
else if (Property->getName() == "IntrNoDuplicate")
isNoDuplicate = true;
else if (Property->getName() == "IntrConvergent")
isConvergent = true;
else if (Property->getName() == "IntrNoReturn")
isNoReturn = true;
else if (Property->isSubClassOf("NoCapture")) {
unsigned ArgNo = Property->getValueAsInt("ArgNo");
ArgumentAttributes.push_back(std::make_pair(ArgNo, NoCapture));
} else if (Property->isSubClassOf("ReadOnly")) {
unsigned ArgNo = Property->getValueAsInt("ArgNo");
ArgumentAttributes.push_back(std::make_pair(ArgNo, ReadOnly));
} else if (Property->isSubClassOf("ReadNone")) {
unsigned ArgNo = Property->getValueAsInt("ArgNo");
ArgumentAttributes.push_back(std::make_pair(ArgNo, ReadNone));
} else
llvm_unreachable("Unknown property!");
}
// Sort the argument attributes for later benefit.
std::sort(ArgumentAttributes.begin(), ArgumentAttributes.end());
}
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/TableGenBackends.h | //===- TableGenBackends.h - Declarations for LLVM TableGen Backends -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the declarations for all of the LLVM TableGen
// backends. A "TableGen backend" is just a function. See below for a
// precise description.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_UTILS_TABLEGEN_TABLEGENBACKENDS_H
#define LLVM_UTILS_TABLEGEN_TABLEGENBACKENDS_H
// A TableGen backend is a function that looks like
//
// EmitFoo(RecordKeeper &RK, raw_ostream &OS /*, anything else you need */ )
//
// What you do inside of that function is up to you, but it will usually
// involve generating C++ code to the provided raw_ostream.
//
// The RecordKeeper is just a top-level container for an in-memory
// representation of the data encoded in the TableGen file. What a TableGen
// backend does is walk around that in-memory representation and generate
// stuff based on the information it contains.
//
// The in-memory representation is a node-graph (think of it like JSON but
// with a richer ontology of types), where the nodes are subclasses of
// Record. The methods `getClass`, `getDef` are the basic interface to
// access the node-graph. RecordKeeper also provides a handy method
// `getAllDerivedDefinitions`. Consult "include/llvm/TableGen/Record.h" for
// the exact interfaces provided by Record's and RecordKeeper.
//
// A common pattern for TableGen backends is for the EmitFoo function to
// instantiate a class which holds some context for the generation process,
// and then have most of the work happen in that class's methods. This
// pattern partly has historical roots in the previous TableGen backend API
// that involved a class and an invocation like `FooEmitter(RK).run(OS)`.
//
// Remember to wrap private things in an anonymous namespace. For most
// backends, this means that the EmitFoo function is the only thing not in
// the anonymous namespace.
// FIXME: Reorganize TableGen so that build dependencies can be more
// accurately expressed. Currently, touching any of the emitters (or
// anything that they transitively depend on) causes everything dependent
// on TableGen to be rebuilt (this includes all the targets!). Perhaps have
// a standalone TableGen binary and have the backends be loadable modules
// of some sort; then the dependency could be expressed as being on the
// module, and all the modules would have a common dependency on the
// TableGen binary with as few dependencies as possible on the rest of
// LLVM.
namespace llvm {
class raw_ostream;
class RecordKeeper;
void EmitIntrinsics(RecordKeeper &RK, raw_ostream &OS, bool TargetOnly = false);
void EmitAsmMatcher(RecordKeeper &RK, raw_ostream &OS);
void EmitAsmWriter(RecordKeeper &RK, raw_ostream &OS);
void EmitCallingConv(RecordKeeper &RK, raw_ostream &OS);
void EmitCodeEmitter(RecordKeeper &RK, raw_ostream &OS);
void EmitDAGISel(RecordKeeper &RK, raw_ostream &OS);
void EmitDFAPacketizer(RecordKeeper &RK, raw_ostream &OS);
void EmitDisassembler(RecordKeeper &RK, raw_ostream &OS);
void EmitFastISel(RecordKeeper &RK, raw_ostream &OS);
void EmitInstrInfo(RecordKeeper &RK, raw_ostream &OS);
void EmitPseudoLowering(RecordKeeper &RK, raw_ostream &OS);
void EmitRegisterInfo(RecordKeeper &RK, raw_ostream &OS);
void EmitSubtarget(RecordKeeper &RK, raw_ostream &OS);
void EmitMapTable(RecordKeeper &RK, raw_ostream &OS);
void EmitOptParser(RecordKeeper &RK, raw_ostream &OS);
void EmitCTags(RecordKeeper &RK, raw_ostream &OS);
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/DAGISelEmitter.cpp | //===- DAGISelEmitter.cpp - Generate an instruction selector --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This tablegen backend emits a DAG instruction selector.
//
//===----------------------------------------------------------------------===//
#include "CodeGenDAGPatterns.h"
#include "DAGISelMatcher.h"
#include "llvm/Support/Debug.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
using namespace llvm;
#define DEBUG_TYPE "dag-isel-emitter"
namespace {
/// DAGISelEmitter - The top-level class which coordinates construction
/// and emission of the instruction selector.
class DAGISelEmitter {
CodeGenDAGPatterns CGP;
public:
explicit DAGISelEmitter(RecordKeeper &R) : CGP(R) {}
void run(raw_ostream &OS);
};
} // End anonymous namespace
//===----------------------------------------------------------------------===//
// DAGISelEmitter Helper methods
//
/// getResultPatternCost - Compute the number of instructions for this pattern.
/// This is a temporary hack. We should really include the instruction
/// latencies in this calculation.
static unsigned getResultPatternCost(TreePatternNode *P,
CodeGenDAGPatterns &CGP) {
if (P->isLeaf()) return 0;
unsigned Cost = 0;
Record *Op = P->getOperator();
if (Op->isSubClassOf("Instruction")) {
Cost++;
CodeGenInstruction &II = CGP.getTargetInfo().getInstruction(Op);
if (II.usesCustomInserter)
Cost += 10;
}
for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i)
Cost += getResultPatternCost(P->getChild(i), CGP);
return Cost;
}
/// getResultPatternCodeSize - Compute the code size of instructions for this
/// pattern.
static unsigned getResultPatternSize(TreePatternNode *P,
CodeGenDAGPatterns &CGP) {
if (P->isLeaf()) return 0;
unsigned Cost = 0;
Record *Op = P->getOperator();
if (Op->isSubClassOf("Instruction")) {
Cost += Op->getValueAsInt("CodeSize");
}
for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i)
Cost += getResultPatternSize(P->getChild(i), CGP);
return Cost;
}
namespace {
// PatternSortingPredicate - return true if we prefer to match LHS before RHS.
// In particular, we want to match maximal patterns first and lowest cost within
// a particular complexity first.
struct PatternSortingPredicate {
PatternSortingPredicate(CodeGenDAGPatterns &cgp) : CGP(cgp) {}
CodeGenDAGPatterns &CGP;
bool operator()(const PatternToMatch *LHS, const PatternToMatch *RHS) {
const TreePatternNode *LHSSrc = LHS->getSrcPattern();
const TreePatternNode *RHSSrc = RHS->getSrcPattern();
MVT LHSVT = (LHSSrc->getNumTypes() != 0 ? LHSSrc->getType(0) : MVT::Other);
MVT RHSVT = (RHSSrc->getNumTypes() != 0 ? RHSSrc->getType(0) : MVT::Other);
if (LHSVT.isVector() != RHSVT.isVector())
return RHSVT.isVector();
if (LHSVT.isFloatingPoint() != RHSVT.isFloatingPoint())
return RHSVT.isFloatingPoint();
// Otherwise, if the patterns might both match, sort based on complexity,
// which means that we prefer to match patterns that cover more nodes in the
// input over nodes that cover fewer.
int LHSSize = LHS->getPatternComplexity(CGP);
int RHSSize = RHS->getPatternComplexity(CGP);
if (LHSSize > RHSSize) return true; // LHS -> bigger -> less cost
if (LHSSize < RHSSize) return false;
// If the patterns have equal complexity, compare generated instruction cost
unsigned LHSCost = getResultPatternCost(LHS->getDstPattern(), CGP);
unsigned RHSCost = getResultPatternCost(RHS->getDstPattern(), CGP);
if (LHSCost < RHSCost) return true;
if (LHSCost > RHSCost) return false;
unsigned LHSPatSize = getResultPatternSize(LHS->getDstPattern(), CGP);
unsigned RHSPatSize = getResultPatternSize(RHS->getDstPattern(), CGP);
if (LHSPatSize < RHSPatSize) return true;
if (LHSPatSize > RHSPatSize) return false;
// Sort based on the UID of the pattern, giving us a deterministic ordering
// if all other sorting conditions fail.
assert(LHS == RHS || LHS->ID != RHS->ID);
return LHS->ID < RHS->ID;
}
};
} // End anonymous namespace
void DAGISelEmitter::run(raw_ostream &OS) {
emitSourceFileHeader("DAG Instruction Selector for the " +
CGP.getTargetInfo().getName() + " target", OS);
OS << "// *** NOTE: This file is #included into the middle of the target\n"
<< "// *** instruction selector class. These functions are really "
<< "methods.\n\n";
DEBUG(errs() << "\n\nALL PATTERNS TO MATCH:\n\n";
for (CodeGenDAGPatterns::ptm_iterator I = CGP.ptm_begin(),
E = CGP.ptm_end(); I != E; ++I) {
errs() << "PATTERN: "; I->getSrcPattern()->dump();
errs() << "\nRESULT: "; I->getDstPattern()->dump();
errs() << "\n";
});
// Add all the patterns to a temporary list so we can sort them.
std::vector<const PatternToMatch*> Patterns;
for (CodeGenDAGPatterns::ptm_iterator I = CGP.ptm_begin(), E = CGP.ptm_end();
I != E; ++I)
Patterns.push_back(&*I);
// We want to process the matches in order of minimal cost. Sort the patterns
// so the least cost one is at the start.
std::sort(Patterns.begin(), Patterns.end(), PatternSortingPredicate(CGP));
// Convert each variant of each pattern into a Matcher.
std::vector<Matcher*> PatternMatchers;
for (unsigned i = 0, e = Patterns.size(); i != e; ++i) {
for (unsigned Variant = 0; ; ++Variant) {
if (Matcher *M = ConvertPatternToMatcher(*Patterns[i], Variant, CGP))
PatternMatchers.push_back(M);
else
break;
}
}
std::unique_ptr<Matcher> TheMatcher =
llvm::make_unique<ScopeMatcher>(PatternMatchers);
OptimizeMatcher(TheMatcher, CGP);
//Matcher->dump();
EmitMatcherTable(TheMatcher.get(), CGP, OS);
}
namespace llvm {
void EmitDAGISel(RecordKeeper &RK, raw_ostream &OS) {
DAGISelEmitter(RK).run(OS);
}
} // End llvm namespace
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/X86ModRMFilters.h | //===- X86ModRMFilters.h - Disassembler ModR/M filterss ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is part of the X86 Disassembler Emitter.
// It contains ModR/M filters that determine which values of the ModR/M byte
// are valid for a partiuclar instruction.
// Documentation for the disassembler emitter in general can be found in
// X86DisasemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_UTILS_TABLEGEN_X86MODRMFILTERS_H
#define LLVM_UTILS_TABLEGEN_X86MODRMFILTERS_H
#include "llvm/Support/DataTypes.h"
namespace llvm {
namespace X86Disassembler {
/// ModRMFilter - Abstract base class for clases that recognize patterns in
/// ModR/M bytes.
class ModRMFilter {
virtual void anchor();
public:
/// Destructor - Override as necessary.
virtual ~ModRMFilter() { }
/// isDumb - Indicates whether this filter returns the same value for
/// any value of the ModR/M byte.
///
/// @result - True if the filter returns the same value for any ModR/M
/// byte; false if not.
virtual bool isDumb() const { return false; }
/// accepts - Indicates whether the filter accepts a particular ModR/M
/// byte value.
///
/// @result - True if the filter accepts the ModR/M byte; false if not.
virtual bool accepts(uint8_t modRM) const = 0;
};
/// DumbFilter - Accepts any ModR/M byte. Used for instructions that do not
/// require a ModR/M byte or instructions where the entire ModR/M byte is used
/// for operands.
class DumbFilter : public ModRMFilter {
void anchor() override;
public:
bool isDumb() const override {
return true;
}
bool accepts(uint8_t modRM) const override {
return true;
}
};
/// ModFilter - Filters based on the mod bits [bits 7-6] of the ModR/M byte.
/// Some instructions are classified based on whether they are 11 or anything
/// else. This filter performs that classification.
class ModFilter : public ModRMFilter {
void anchor() override;
bool R;
public:
/// Constructor
///
/// \param r True if the mod bits of the ModR/M byte must be 11; false
/// otherwise. The name r derives from the fact that the mod
/// bits indicate whether the R/M bits [bits 2-0] signify a
/// register or a memory operand.
ModFilter(bool r) :
ModRMFilter(),
R(r) {
}
bool accepts(uint8_t modRM) const override {
return (R == ((modRM & 0xc0) == 0xc0));
}
};
/// ExtendedFilter - Extended opcodes are classified based on the value of the
/// mod field [bits 7-6] and the value of the nnn field [bits 5-3].
class ExtendedFilter : public ModRMFilter {
void anchor() override;
bool R;
uint8_t NNN;
public:
/// Constructor
///
/// \param r True if the mod field must be set to 11; false otherwise.
/// The name is explained at ModFilter.
/// \param nnn The required value of the nnn field.
ExtendedFilter(bool r, uint8_t nnn) :
ModRMFilter(),
R(r),
NNN(nnn) {
}
bool accepts(uint8_t modRM) const override {
return (((R && ((modRM & 0xc0) == 0xc0)) ||
(!R && ((modRM & 0xc0) != 0xc0))) &&
(((modRM & 0x38) >> 3) == NNN));
}
};
/// ExactFilter - The occasional extended opcode (such as VMCALL or MONITOR)
/// requires the ModR/M byte to have a specific value.
class ExactFilter : public ModRMFilter {
void anchor() override;
uint8_t ModRM;
public:
/// Constructor
///
/// \param modRM The required value of the full ModR/M byte.
ExactFilter(uint8_t modRM) :
ModRMFilter(),
ModRM(modRM) {
}
bool accepts(uint8_t modRM) const override {
return (ModRM == modRM);
}
};
} // namespace X86Disassembler
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/PseudoLoweringEmitter.cpp | //===- PseudoLoweringEmitter.cpp - PseudoLowering Generator -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "CodeGenInstruction.h"
#include "CodeGenTarget.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "pseudo-lowering"
namespace {
class PseudoLoweringEmitter {
struct OpData {
enum MapKind { Operand, Imm, Reg };
MapKind Kind;
union {
unsigned Operand; // Operand number mapped to.
uint64_t Imm; // Integer immedate value.
Record *Reg; // Physical register.
} Data;
};
struct PseudoExpansion {
CodeGenInstruction Source; // The source pseudo instruction definition.
CodeGenInstruction Dest; // The destination instruction to lower to.
IndexedMap<OpData> OperandMap;
PseudoExpansion(CodeGenInstruction &s, CodeGenInstruction &d,
IndexedMap<OpData> &m) :
Source(s), Dest(d), OperandMap(m) {}
};
RecordKeeper &Records;
// It's overkill to have an instance of the full CodeGenTarget object,
// but it loads everything on demand, not in the constructor, so it's
// lightweight in performance, so it works out OK.
CodeGenTarget Target;
SmallVector<PseudoExpansion, 64> Expansions;
unsigned addDagOperandMapping(Record *Rec, DagInit *Dag,
CodeGenInstruction &Insn,
IndexedMap<OpData> &OperandMap,
unsigned BaseIdx);
void evaluateExpansion(Record *Pseudo);
void emitLoweringEmitter(raw_ostream &o);
public:
PseudoLoweringEmitter(RecordKeeper &R) : Records(R), Target(R) {}
/// run - Output the pseudo-lowerings.
void run(raw_ostream &o);
};
} // End anonymous namespace
// FIXME: This pass currently can only expand a pseudo to a single instruction.
// The pseudo expansion really should take a list of dags, not just
// a single dag, so we can do fancier things.
unsigned PseudoLoweringEmitter::
addDagOperandMapping(Record *Rec, DagInit *Dag, CodeGenInstruction &Insn,
IndexedMap<OpData> &OperandMap, unsigned BaseIdx) {
unsigned OpsAdded = 0;
for (unsigned i = 0, e = Dag->getNumArgs(); i != e; ++i) {
if (DefInit *DI = dyn_cast<DefInit>(Dag->getArg(i))) {
// Physical register reference. Explicit check for the special case
// "zero_reg" definition.
if (DI->getDef()->isSubClassOf("Register") ||
DI->getDef()->getName() == "zero_reg") {
OperandMap[BaseIdx + i].Kind = OpData::Reg;
OperandMap[BaseIdx + i].Data.Reg = DI->getDef();
++OpsAdded;
continue;
}
// Normal operands should always have the same type, or we have a
// problem.
// FIXME: We probably shouldn't ever get a non-zero BaseIdx here.
assert(BaseIdx == 0 && "Named subargument in pseudo expansion?!");
if (DI->getDef() != Insn.Operands[BaseIdx + i].Rec)
PrintFatalError(Rec->getLoc(),
"Pseudo operand type '" + DI->getDef()->getName() +
"' does not match expansion operand type '" +
Insn.Operands[BaseIdx + i].Rec->getName() + "'");
// Source operand maps to destination operand. The Data element
// will be filled in later, just set the Kind for now. Do it
// for each corresponding MachineInstr operand, not just the first.
for (unsigned I = 0, E = Insn.Operands[i].MINumOperands; I != E; ++I)
OperandMap[BaseIdx + i + I].Kind = OpData::Operand;
OpsAdded += Insn.Operands[i].MINumOperands;
} else if (IntInit *II = dyn_cast<IntInit>(Dag->getArg(i))) {
OperandMap[BaseIdx + i].Kind = OpData::Imm;
OperandMap[BaseIdx + i].Data.Imm = II->getValue();
++OpsAdded;
} else if (DagInit *SubDag = dyn_cast<DagInit>(Dag->getArg(i))) {
// Just add the operands recursively. This is almost certainly
// a constant value for a complex operand (> 1 MI operand).
unsigned NewOps =
addDagOperandMapping(Rec, SubDag, Insn, OperandMap, BaseIdx + i);
OpsAdded += NewOps;
// Since we added more than one, we also need to adjust the base.
BaseIdx += NewOps - 1;
} else
llvm_unreachable("Unhandled pseudo-expansion argument type!");
}
return OpsAdded;
}
void PseudoLoweringEmitter::evaluateExpansion(Record *Rec) {
DEBUG(dbgs() << "Pseudo definition: " << Rec->getName() << "\n");
// Validate that the result pattern has the corrent number and types
// of arguments for the instruction it references.
DagInit *Dag = Rec->getValueAsDag("ResultInst");
assert(Dag && "Missing result instruction in pseudo expansion!");
DEBUG(dbgs() << " Result: " << *Dag << "\n");
DefInit *OpDef = dyn_cast<DefInit>(Dag->getOperator());
if (!OpDef)
PrintFatalError(Rec->getLoc(), Rec->getName() +
" has unexpected operator type!");
Record *Operator = OpDef->getDef();
if (!Operator->isSubClassOf("Instruction"))
PrintFatalError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
"' is not an instruction!");
CodeGenInstruction Insn(Operator);
if (Insn.isCodeGenOnly || Insn.isPseudo)
PrintFatalError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
"' cannot be another pseudo instruction!");
if (Insn.Operands.size() != Dag->getNumArgs())
PrintFatalError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
"' operand count mismatch");
unsigned NumMIOperands = 0;
for (unsigned i = 0, e = Insn.Operands.size(); i != e; ++i)
NumMIOperands += Insn.Operands[i].MINumOperands;
IndexedMap<OpData> OperandMap;
OperandMap.grow(NumMIOperands);
addDagOperandMapping(Rec, Dag, Insn, OperandMap, 0);
// If there are more operands that weren't in the DAG, they have to
// be operands that have default values, or we have an error. Currently,
// Operands that are a subclass of OperandWithDefaultOp have default values.
// Validate that each result pattern argument has a matching (by name)
// argument in the source instruction, in either the (outs) or (ins) list.
// Also check that the type of the arguments match.
//
// Record the mapping of the source to result arguments for use by
// the lowering emitter.
CodeGenInstruction SourceInsn(Rec);
StringMap<unsigned> SourceOperands;
for (unsigned i = 0, e = SourceInsn.Operands.size(); i != e; ++i)
SourceOperands[SourceInsn.Operands[i].Name] = i;
DEBUG(dbgs() << " Operand mapping:\n");
for (unsigned i = 0, e = Insn.Operands.size(); i != e; ++i) {
// We've already handled constant values. Just map instruction operands
// here.
if (OperandMap[Insn.Operands[i].MIOperandNo].Kind != OpData::Operand)
continue;
StringMap<unsigned>::iterator SourceOp =
SourceOperands.find(Dag->getArgName(i));
if (SourceOp == SourceOperands.end())
PrintFatalError(Rec->getLoc(),
"Pseudo output operand '" + Dag->getArgName(i) +
"' has no matching source operand.");
// Map the source operand to the destination operand index for each
// MachineInstr operand.
for (unsigned I = 0, E = Insn.Operands[i].MINumOperands; I != E; ++I)
OperandMap[Insn.Operands[i].MIOperandNo + I].Data.Operand =
SourceOp->getValue();
DEBUG(dbgs() << " " << SourceOp->getValue() << " ==> " << i << "\n");
}
Expansions.push_back(PseudoExpansion(SourceInsn, Insn, OperandMap));
}
void PseudoLoweringEmitter::emitLoweringEmitter(raw_ostream &o) {
// Emit file header.
emitSourceFileHeader("Pseudo-instruction MC lowering Source Fragment", o);
o << "bool " << Target.getName() + "AsmPrinter" << "::\n"
<< "emitPseudoExpansionLowering(MCStreamer &OutStreamer,\n"
<< " const MachineInstr *MI) {\n";
if (!Expansions.empty()) {
o << " switch (MI->getOpcode()) {\n"
<< " default: return false;\n";
for (auto &Expansion : Expansions) {
CodeGenInstruction &Source = Expansion.Source;
CodeGenInstruction &Dest = Expansion.Dest;
o << " case " << Source.Namespace << "::"
<< Source.TheDef->getName() << ": {\n"
<< " MCInst TmpInst;\n"
<< " MCOperand MCOp;\n"
<< " TmpInst.setOpcode(" << Dest.Namespace << "::"
<< Dest.TheDef->getName() << ");\n";
// Copy the operands from the source instruction.
// FIXME: Instruction operands with defaults values (predicates and cc_out
// in ARM, for example shouldn't need explicit values in the
// expansion DAG.
unsigned MIOpNo = 0;
for (const auto &DestOperand : Dest.Operands) {
o << " // Operand: " << DestOperand.Name << "\n";
for (unsigned i = 0, e = DestOperand.MINumOperands; i != e; ++i) {
switch (Expansion.OperandMap[MIOpNo + i].Kind) {
case OpData::Operand:
o << " lowerOperand(MI->getOperand("
<< Source.Operands[Expansion.OperandMap[MIOpNo].Data
.Operand].MIOperandNo + i
<< "), MCOp);\n"
<< " TmpInst.addOperand(MCOp);\n";
break;
case OpData::Imm:
o << " TmpInst.addOperand(MCOperand::createImm("
<< Expansion.OperandMap[MIOpNo + i].Data.Imm << "));\n";
break;
case OpData::Reg: {
Record *Reg = Expansion.OperandMap[MIOpNo + i].Data.Reg;
o << " TmpInst.addOperand(MCOperand::createReg(";
// "zero_reg" is special.
if (Reg->getName() == "zero_reg")
o << "0";
else
o << Reg->getValueAsString("Namespace") << "::"
<< Reg->getName();
o << "));\n";
break;
}
}
}
MIOpNo += DestOperand.MINumOperands;
}
if (Dest.Operands.isVariadic) {
MIOpNo = Source.Operands.size() + 1;
o << " // variable_ops\n";
o << " for (unsigned i = " << MIOpNo
<< ", e = MI->getNumOperands(); i != e; ++i)\n"
<< " if (lowerOperand(MI->getOperand(i), MCOp))\n"
<< " TmpInst.addOperand(MCOp);\n";
}
o << " EmitToStreamer(OutStreamer, TmpInst);\n"
<< " break;\n"
<< " }\n";
}
o << " }\n return true;";
} else
o << " return false;";
o << "\n}\n\n";
}
void PseudoLoweringEmitter::run(raw_ostream &o) {
Record *ExpansionClass = Records.getClass("PseudoInstExpansion");
Record *InstructionClass = Records.getClass("Instruction");
assert(ExpansionClass && "PseudoInstExpansion class definition missing!");
assert(InstructionClass && "Instruction class definition missing!");
std::vector<Record*> Insts;
for (const auto &D : Records.getDefs()) {
if (D.second->isSubClassOf(ExpansionClass) &&
D.second->isSubClassOf(InstructionClass))
Insts.push_back(D.second.get());
}
// Process the pseudo expansion definitions, validating them as we do so.
for (unsigned i = 0, e = Insts.size(); i != e; ++i)
evaluateExpansion(Insts[i]);
// Generate expansion code to lower the pseudo to an MCInst of the real
// instruction.
emitLoweringEmitter(o);
}
namespace llvm {
void EmitPseudoLowering(RecordKeeper &RK, raw_ostream &OS) {
PseudoLoweringEmitter(RK).run(OS);
}
} // End llvm namespace
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/DAGISelMatcher.h | //===- DAGISelMatcher.h - Representation of DAG pattern matcher -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_UTILS_TABLEGEN_DAGISELMATCHER_H
#define LLVM_UTILS_TABLEGEN_DAGISELMATCHER_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/Support/Casting.h"
namespace llvm {
struct CodeGenRegister;
class CodeGenDAGPatterns;
class Matcher;
class PatternToMatch;
class raw_ostream;
class ComplexPattern;
class Record;
class SDNodeInfo;
class TreePredicateFn;
class TreePattern;
Matcher *ConvertPatternToMatcher(const PatternToMatch &Pattern,unsigned Variant,
const CodeGenDAGPatterns &CGP);
void OptimizeMatcher(std::unique_ptr<Matcher> &Matcher,
const CodeGenDAGPatterns &CGP);
void EmitMatcherTable(const Matcher *Matcher, const CodeGenDAGPatterns &CGP,
raw_ostream &OS);
/// Matcher - Base class for all the DAG ISel Matcher representation
/// nodes.
class Matcher {
// The next matcher node that is executed after this one. Null if this is the
// last stage of a match.
std::unique_ptr<Matcher> Next;
virtual void anchor();
public:
enum KindTy {
// Matcher state manipulation.
Scope, // Push a checking scope.
RecordNode, // Record the current node.
RecordChild, // Record a child of the current node.
RecordMemRef, // Record the memref in the current node.
CaptureGlueInput, // If the current node has an input glue, save it.
MoveChild, // Move current node to specified child.
MoveParent, // Move current node to parent.
// Predicate checking.
CheckSame, // Fail if not same as prev match.
CheckChildSame, // Fail if child not same as prev match.
CheckPatternPredicate,
CheckPredicate, // Fail if node predicate fails.
CheckOpcode, // Fail if not opcode.
SwitchOpcode, // Dispatch based on opcode.
CheckType, // Fail if not correct type.
SwitchType, // Dispatch based on type.
CheckChildType, // Fail if child has wrong type.
CheckInteger, // Fail if wrong val.
CheckChildInteger, // Fail if child is wrong val.
CheckCondCode, // Fail if not condcode.
CheckValueType,
CheckComplexPat,
CheckAndImm,
CheckOrImm,
CheckFoldableChainNode,
// Node creation/emisssion.
EmitInteger, // Create a TargetConstant
EmitStringInteger, // Create a TargetConstant from a string.
EmitRegister, // Create a register.
EmitConvertToTarget, // Convert a imm/fpimm to target imm/fpimm
EmitMergeInputChains, // Merge together a chains for an input.
EmitCopyToReg, // Emit a copytoreg into a physreg.
EmitNode, // Create a DAG node
EmitNodeXForm, // Run a SDNodeXForm
MarkGlueResults, // Indicate which interior nodes have glue results.
CompleteMatch, // Finish a match and update the results.
MorphNodeTo // Build a node, finish a match and update results.
};
const KindTy Kind;
protected:
Matcher(KindTy K) : Kind(K) {}
public:
virtual ~Matcher() {}
KindTy getKind() const { return Kind; }
Matcher *getNext() { return Next.get(); }
const Matcher *getNext() const { return Next.get(); }
void setNext(Matcher *C) { Next.reset(C); }
Matcher *takeNext() { return Next.release(); }
std::unique_ptr<Matcher> &getNextPtr() { return Next; }
bool isEqual(const Matcher *M) const {
if (getKind() != M->getKind()) return false;
return isEqualImpl(M);
}
unsigned getHash() const {
// Clear the high bit so we don't conflict with tombstones etc.
return ((getHashImpl() << 4) ^ getKind()) & (~0U>>1);
}
/// isSafeToReorderWithPatternPredicate - Return true if it is safe to sink a
/// PatternPredicate node past this one.
virtual bool isSafeToReorderWithPatternPredicate() const {
return false;
}
/// isSimplePredicateNode - Return true if this is a simple predicate that
/// operates on the node or its children without potential side effects or a
/// change of the current node.
bool isSimplePredicateNode() const {
switch (getKind()) {
default: return false;
case CheckSame:
case CheckChildSame:
case CheckPatternPredicate:
case CheckPredicate:
case CheckOpcode:
case CheckType:
case CheckChildType:
case CheckInteger:
case CheckChildInteger:
case CheckCondCode:
case CheckValueType:
case CheckAndImm:
case CheckOrImm:
case CheckFoldableChainNode:
return true;
}
}
/// isSimplePredicateOrRecordNode - Return true if this is a record node or
/// a simple predicate.
bool isSimplePredicateOrRecordNode() const {
return isSimplePredicateNode() ||
getKind() == RecordNode || getKind() == RecordChild;
}
/// unlinkNode - Unlink the specified node from this chain. If Other == this,
/// we unlink the next pointer and return it. Otherwise we unlink Other from
/// the list and return this.
Matcher *unlinkNode(Matcher *Other);
/// canMoveBefore - Return true if this matcher is the same as Other, or if
/// we can move this matcher past all of the nodes in-between Other and this
/// node. Other must be equal to or before this.
bool canMoveBefore(const Matcher *Other) const;
/// canMoveBeforeNode - Return true if it is safe to move the current matcher
/// across the specified one.
bool canMoveBeforeNode(const Matcher *Other) const;
/// isContradictory - Return true of these two matchers could never match on
/// the same node.
bool isContradictory(const Matcher *Other) const {
// Since this predicate is reflexive, we canonicalize the ordering so that
// we always match a node against nodes with kinds that are greater or equal
// to them. For example, we'll pass in a CheckType node as an argument to
// the CheckOpcode method, not the other way around.
if (getKind() < Other->getKind())
return isContradictoryImpl(Other);
return Other->isContradictoryImpl(this);
}
void print(raw_ostream &OS, unsigned indent = 0) const;
void printOne(raw_ostream &OS) const;
void dump() const;
protected:
virtual void printImpl(raw_ostream &OS, unsigned indent) const = 0;
virtual bool isEqualImpl(const Matcher *M) const = 0;
virtual unsigned getHashImpl() const = 0;
virtual bool isContradictoryImpl(const Matcher *M) const { return false; }
};
/// ScopeMatcher - This attempts to match each of its children to find the first
/// one that successfully matches. If one child fails, it tries the next child.
/// If none of the children match then this check fails. It never has a 'next'.
class ScopeMatcher : public Matcher {
SmallVector<Matcher*, 4> Children;
public:
ScopeMatcher(ArrayRef<Matcher *> children)
: Matcher(Scope), Children(children.begin(), children.end()) {
}
~ScopeMatcher() override;
unsigned getNumChildren() const { return Children.size(); }
Matcher *getChild(unsigned i) { return Children[i]; }
const Matcher *getChild(unsigned i) const { return Children[i]; }
void resetChild(unsigned i, Matcher *N) {
delete Children[i];
Children[i] = N;
}
Matcher *takeChild(unsigned i) {
Matcher *Res = Children[i];
Children[i] = nullptr;
return Res;
}
void setNumChildren(unsigned NC) {
if (NC < Children.size()) {
// delete any children we're about to lose pointers to.
for (unsigned i = NC, e = Children.size(); i != e; ++i)
delete Children[i];
}
Children.resize(NC);
}
static inline bool classof(const Matcher *N) {
return N->getKind() == Scope;
}
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override { return false; }
unsigned getHashImpl() const override { return 12312; }
};
/// RecordMatcher - Save the current node in the operand list.
class RecordMatcher : public Matcher {
/// WhatFor - This is a string indicating why we're recording this. This
/// should only be used for comment generation not anything semantic.
std::string WhatFor;
/// ResultNo - The slot number in the RecordedNodes vector that this will be,
/// just printed as a comment.
unsigned ResultNo;
public:
RecordMatcher(const std::string &whatfor, unsigned resultNo)
: Matcher(RecordNode), WhatFor(whatfor), ResultNo(resultNo) {}
const std::string &getWhatFor() const { return WhatFor; }
unsigned getResultNo() const { return ResultNo; }
static inline bool classof(const Matcher *N) {
return N->getKind() == RecordNode;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override { return true; }
unsigned getHashImpl() const override { return 0; }
};
/// RecordChildMatcher - Save a numbered child of the current node, or fail
/// the match if it doesn't exist. This is logically equivalent to:
/// MoveChild N + RecordNode + MoveParent.
class RecordChildMatcher : public Matcher {
unsigned ChildNo;
/// WhatFor - This is a string indicating why we're recording this. This
/// should only be used for comment generation not anything semantic.
std::string WhatFor;
/// ResultNo - The slot number in the RecordedNodes vector that this will be,
/// just printed as a comment.
unsigned ResultNo;
public:
RecordChildMatcher(unsigned childno, const std::string &whatfor,
unsigned resultNo)
: Matcher(RecordChild), ChildNo(childno), WhatFor(whatfor),
ResultNo(resultNo) {}
unsigned getChildNo() const { return ChildNo; }
const std::string &getWhatFor() const { return WhatFor; }
unsigned getResultNo() const { return ResultNo; }
static inline bool classof(const Matcher *N) {
return N->getKind() == RecordChild;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<RecordChildMatcher>(M)->getChildNo() == getChildNo();
}
unsigned getHashImpl() const override { return getChildNo(); }
};
/// RecordMemRefMatcher - Save the current node's memref.
class RecordMemRefMatcher : public Matcher {
public:
RecordMemRefMatcher() : Matcher(RecordMemRef) {}
static inline bool classof(const Matcher *N) {
return N->getKind() == RecordMemRef;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override { return true; }
unsigned getHashImpl() const override { return 0; }
};
/// CaptureGlueInputMatcher - If the current record has a glue input, record
/// it so that it is used as an input to the generated code.
class CaptureGlueInputMatcher : public Matcher {
public:
CaptureGlueInputMatcher() : Matcher(CaptureGlueInput) {}
static inline bool classof(const Matcher *N) {
return N->getKind() == CaptureGlueInput;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override { return true; }
unsigned getHashImpl() const override { return 0; }
};
/// MoveChildMatcher - This tells the interpreter to move into the
/// specified child node.
class MoveChildMatcher : public Matcher {
unsigned ChildNo;
public:
MoveChildMatcher(unsigned childNo) : Matcher(MoveChild), ChildNo(childNo) {}
unsigned getChildNo() const { return ChildNo; }
static inline bool classof(const Matcher *N) {
return N->getKind() == MoveChild;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<MoveChildMatcher>(M)->getChildNo() == getChildNo();
}
unsigned getHashImpl() const override { return getChildNo(); }
};
/// MoveParentMatcher - This tells the interpreter to move to the parent
/// of the current node.
class MoveParentMatcher : public Matcher {
public:
MoveParentMatcher() : Matcher(MoveParent) {}
static inline bool classof(const Matcher *N) {
return N->getKind() == MoveParent;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override { return true; }
unsigned getHashImpl() const override { return 0; }
};
/// CheckSameMatcher - This checks to see if this node is exactly the same
/// node as the specified match that was recorded with 'Record'. This is used
/// when patterns have the same name in them, like '(mul GPR:$in, GPR:$in)'.
class CheckSameMatcher : public Matcher {
unsigned MatchNumber;
public:
CheckSameMatcher(unsigned matchnumber)
: Matcher(CheckSame), MatchNumber(matchnumber) {}
unsigned getMatchNumber() const { return MatchNumber; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckSame;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<CheckSameMatcher>(M)->getMatchNumber() == getMatchNumber();
}
unsigned getHashImpl() const override { return getMatchNumber(); }
};
/// CheckChildSameMatcher - This checks to see if child node is exactly the same
/// node as the specified match that was recorded with 'Record'. This is used
/// when patterns have the same name in them, like '(mul GPR:$in, GPR:$in)'.
class CheckChildSameMatcher : public Matcher {
unsigned ChildNo;
unsigned MatchNumber;
public:
CheckChildSameMatcher(unsigned childno, unsigned matchnumber)
: Matcher(CheckChildSame), ChildNo(childno), MatchNumber(matchnumber) {}
unsigned getChildNo() const { return ChildNo; }
unsigned getMatchNumber() const { return MatchNumber; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckChildSame;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<CheckChildSameMatcher>(M)->ChildNo == ChildNo &&
cast<CheckChildSameMatcher>(M)->MatchNumber == MatchNumber;
}
unsigned getHashImpl() const override { return (MatchNumber << 2) | ChildNo; }
};
/// CheckPatternPredicateMatcher - This checks the target-specific predicate
/// to see if the entire pattern is capable of matching. This predicate does
/// not take a node as input. This is used for subtarget feature checks etc.
class CheckPatternPredicateMatcher : public Matcher {
std::string Predicate;
public:
CheckPatternPredicateMatcher(StringRef predicate)
: Matcher(CheckPatternPredicate), Predicate(predicate) {}
StringRef getPredicate() const { return Predicate; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckPatternPredicate;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<CheckPatternPredicateMatcher>(M)->getPredicate() == Predicate;
}
unsigned getHashImpl() const override;
};
/// CheckPredicateMatcher - This checks the target-specific predicate to
/// see if the node is acceptable.
class CheckPredicateMatcher : public Matcher {
TreePattern *Pred;
public:
CheckPredicateMatcher(const TreePredicateFn &pred);
TreePredicateFn getPredicate() const;
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckPredicate;
}
// TODO: Ok?
//virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<CheckPredicateMatcher>(M)->Pred == Pred;
}
unsigned getHashImpl() const override;
};
/// CheckOpcodeMatcher - This checks to see if the current node has the
/// specified opcode, if not it fails to match.
class CheckOpcodeMatcher : public Matcher {
const SDNodeInfo &Opcode;
public:
CheckOpcodeMatcher(const SDNodeInfo &opcode)
: Matcher(CheckOpcode), Opcode(opcode) {}
const SDNodeInfo &getOpcode() const { return Opcode; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckOpcode;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override;
unsigned getHashImpl() const override;
bool isContradictoryImpl(const Matcher *M) const override;
};
/// SwitchOpcodeMatcher - Switch based on the current node's opcode, dispatching
/// to one matcher per opcode. If the opcode doesn't match any of the cases,
/// then the match fails. This is semantically equivalent to a Scope node where
/// every child does a CheckOpcode, but is much faster.
class SwitchOpcodeMatcher : public Matcher {
SmallVector<std::pair<const SDNodeInfo*, Matcher*>, 8> Cases;
public:
SwitchOpcodeMatcher(ArrayRef<std::pair<const SDNodeInfo*, Matcher*> > cases)
: Matcher(SwitchOpcode), Cases(cases.begin(), cases.end()) {}
~SwitchOpcodeMatcher() override;
static inline bool classof(const Matcher *N) {
return N->getKind() == SwitchOpcode;
}
unsigned getNumCases() const { return Cases.size(); }
const SDNodeInfo &getCaseOpcode(unsigned i) const { return *Cases[i].first; }
Matcher *getCaseMatcher(unsigned i) { return Cases[i].second; }
const Matcher *getCaseMatcher(unsigned i) const { return Cases[i].second; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override { return false; }
unsigned getHashImpl() const override { return 4123; }
};
/// CheckTypeMatcher - This checks to see if the current node has the
/// specified type at the specified result, if not it fails to match.
class CheckTypeMatcher : public Matcher {
MVT::SimpleValueType Type;
unsigned ResNo;
public:
CheckTypeMatcher(MVT::SimpleValueType type, unsigned resno)
: Matcher(CheckType), Type(type), ResNo(resno) {}
MVT::SimpleValueType getType() const { return Type; }
unsigned getResNo() const { return ResNo; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckType;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<CheckTypeMatcher>(M)->Type == Type;
}
unsigned getHashImpl() const override { return Type; }
bool isContradictoryImpl(const Matcher *M) const override;
};
/// SwitchTypeMatcher - Switch based on the current node's type, dispatching
/// to one matcher per case. If the type doesn't match any of the cases,
/// then the match fails. This is semantically equivalent to a Scope node where
/// every child does a CheckType, but is much faster.
class SwitchTypeMatcher : public Matcher {
SmallVector<std::pair<MVT::SimpleValueType, Matcher*>, 8> Cases;
public:
SwitchTypeMatcher(ArrayRef<std::pair<MVT::SimpleValueType, Matcher*> > cases)
: Matcher(SwitchType), Cases(cases.begin(), cases.end()) {}
~SwitchTypeMatcher() override;
static inline bool classof(const Matcher *N) {
return N->getKind() == SwitchType;
}
unsigned getNumCases() const { return Cases.size(); }
MVT::SimpleValueType getCaseType(unsigned i) const { return Cases[i].first; }
Matcher *getCaseMatcher(unsigned i) { return Cases[i].second; }
const Matcher *getCaseMatcher(unsigned i) const { return Cases[i].second; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override { return false; }
unsigned getHashImpl() const override { return 4123; }
};
/// CheckChildTypeMatcher - This checks to see if a child node has the
/// specified type, if not it fails to match.
class CheckChildTypeMatcher : public Matcher {
unsigned ChildNo;
MVT::SimpleValueType Type;
public:
CheckChildTypeMatcher(unsigned childno, MVT::SimpleValueType type)
: Matcher(CheckChildType), ChildNo(childno), Type(type) {}
unsigned getChildNo() const { return ChildNo; }
MVT::SimpleValueType getType() const { return Type; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckChildType;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<CheckChildTypeMatcher>(M)->ChildNo == ChildNo &&
cast<CheckChildTypeMatcher>(M)->Type == Type;
}
unsigned getHashImpl() const override { return (Type << 3) | ChildNo; }
bool isContradictoryImpl(const Matcher *M) const override;
};
/// CheckIntegerMatcher - This checks to see if the current node is a
/// ConstantSDNode with the specified integer value, if not it fails to match.
class CheckIntegerMatcher : public Matcher {
int64_t Value;
public:
CheckIntegerMatcher(int64_t value)
: Matcher(CheckInteger), Value(value) {}
int64_t getValue() const { return Value; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckInteger;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<CheckIntegerMatcher>(M)->Value == Value;
}
unsigned getHashImpl() const override { return Value; }
bool isContradictoryImpl(const Matcher *M) const override;
};
/// CheckChildIntegerMatcher - This checks to see if the child node is a
/// ConstantSDNode with a specified integer value, if not it fails to match.
class CheckChildIntegerMatcher : public Matcher {
unsigned ChildNo;
int64_t Value;
public:
CheckChildIntegerMatcher(unsigned childno, int64_t value)
: Matcher(CheckChildInteger), ChildNo(childno), Value(value) {}
unsigned getChildNo() const { return ChildNo; }
int64_t getValue() const { return Value; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckChildInteger;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<CheckChildIntegerMatcher>(M)->ChildNo == ChildNo &&
cast<CheckChildIntegerMatcher>(M)->Value == Value;
}
unsigned getHashImpl() const override { return (Value << 3) | ChildNo; }
bool isContradictoryImpl(const Matcher *M) const override;
};
/// CheckCondCodeMatcher - This checks to see if the current node is a
/// CondCodeSDNode with the specified condition, if not it fails to match.
class CheckCondCodeMatcher : public Matcher {
StringRef CondCodeName;
public:
CheckCondCodeMatcher(StringRef condcodename)
: Matcher(CheckCondCode), CondCodeName(condcodename) {}
StringRef getCondCodeName() const { return CondCodeName; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckCondCode;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<CheckCondCodeMatcher>(M)->CondCodeName == CondCodeName;
}
unsigned getHashImpl() const override;
};
/// CheckValueTypeMatcher - This checks to see if the current node is a
/// VTSDNode with the specified type, if not it fails to match.
class CheckValueTypeMatcher : public Matcher {
StringRef TypeName;
public:
CheckValueTypeMatcher(StringRef type_name)
: Matcher(CheckValueType), TypeName(type_name) {}
StringRef getTypeName() const { return TypeName; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckValueType;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<CheckValueTypeMatcher>(M)->TypeName == TypeName;
}
unsigned getHashImpl() const override;
bool isContradictoryImpl(const Matcher *M) const override;
};
/// CheckComplexPatMatcher - This node runs the specified ComplexPattern on
/// the current node.
class CheckComplexPatMatcher : public Matcher {
const ComplexPattern &Pattern;
/// MatchNumber - This is the recorded nodes slot that contains the node we
/// want to match against.
unsigned MatchNumber;
/// Name - The name of the node we're matching, for comment emission.
std::string Name;
/// FirstResult - This is the first slot in the RecordedNodes list that the
/// result of the match populates.
unsigned FirstResult;
public:
CheckComplexPatMatcher(const ComplexPattern &pattern, unsigned matchnumber,
const std::string &name, unsigned firstresult)
: Matcher(CheckComplexPat), Pattern(pattern), MatchNumber(matchnumber),
Name(name), FirstResult(firstresult) {}
const ComplexPattern &getPattern() const { return Pattern; }
unsigned getMatchNumber() const { return MatchNumber; }
const std::string getName() const { return Name; }
unsigned getFirstResult() const { return FirstResult; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckComplexPat;
}
// Not safe to move a pattern predicate past a complex pattern.
bool isSafeToReorderWithPatternPredicate() const override { return false; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return &cast<CheckComplexPatMatcher>(M)->Pattern == &Pattern &&
cast<CheckComplexPatMatcher>(M)->MatchNumber == MatchNumber;
}
unsigned getHashImpl() const override {
return (unsigned)(intptr_t)&Pattern ^ MatchNumber;
}
};
/// CheckAndImmMatcher - This checks to see if the current node is an 'and'
/// with something equivalent to the specified immediate.
class CheckAndImmMatcher : public Matcher {
int64_t Value;
public:
CheckAndImmMatcher(int64_t value)
: Matcher(CheckAndImm), Value(value) {}
int64_t getValue() const { return Value; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckAndImm;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<CheckAndImmMatcher>(M)->Value == Value;
}
unsigned getHashImpl() const override { return Value; }
};
/// CheckOrImmMatcher - This checks to see if the current node is an 'and'
/// with something equivalent to the specified immediate.
class CheckOrImmMatcher : public Matcher {
int64_t Value;
public:
CheckOrImmMatcher(int64_t value)
: Matcher(CheckOrImm), Value(value) {}
int64_t getValue() const { return Value; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckOrImm;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<CheckOrImmMatcher>(M)->Value == Value;
}
unsigned getHashImpl() const override { return Value; }
};
/// CheckFoldableChainNodeMatcher - This checks to see if the current node
/// (which defines a chain operand) is safe to fold into a larger pattern.
class CheckFoldableChainNodeMatcher : public Matcher {
public:
CheckFoldableChainNodeMatcher()
: Matcher(CheckFoldableChainNode) {}
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckFoldableChainNode;
}
bool isSafeToReorderWithPatternPredicate() const override { return true; }
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override { return true; }
unsigned getHashImpl() const override { return 0; }
};
/// EmitIntegerMatcher - This creates a new TargetConstant.
class EmitIntegerMatcher : public Matcher {
int64_t Val;
MVT::SimpleValueType VT;
public:
EmitIntegerMatcher(int64_t val, MVT::SimpleValueType vt)
: Matcher(EmitInteger), Val(val), VT(vt) {}
int64_t getValue() const { return Val; }
MVT::SimpleValueType getVT() const { return VT; }
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitInteger;
}
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<EmitIntegerMatcher>(M)->Val == Val &&
cast<EmitIntegerMatcher>(M)->VT == VT;
}
unsigned getHashImpl() const override { return (Val << 4) | VT; }
};
/// EmitStringIntegerMatcher - A target constant whose value is represented
/// by a string.
class EmitStringIntegerMatcher : public Matcher {
std::string Val;
MVT::SimpleValueType VT;
public:
EmitStringIntegerMatcher(const std::string &val, MVT::SimpleValueType vt)
: Matcher(EmitStringInteger), Val(val), VT(vt) {}
const std::string &getValue() const { return Val; }
MVT::SimpleValueType getVT() const { return VT; }
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitStringInteger;
}
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<EmitStringIntegerMatcher>(M)->Val == Val &&
cast<EmitStringIntegerMatcher>(M)->VT == VT;
}
unsigned getHashImpl() const override;
};
/// EmitRegisterMatcher - This creates a new TargetConstant.
class EmitRegisterMatcher : public Matcher {
/// Reg - The def for the register that we're emitting. If this is null, then
/// this is a reference to zero_reg.
const CodeGenRegister *Reg;
MVT::SimpleValueType VT;
public:
EmitRegisterMatcher(const CodeGenRegister *reg, MVT::SimpleValueType vt)
: Matcher(EmitRegister), Reg(reg), VT(vt) {}
const CodeGenRegister *getReg() const { return Reg; }
MVT::SimpleValueType getVT() const { return VT; }
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitRegister;
}
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<EmitRegisterMatcher>(M)->Reg == Reg &&
cast<EmitRegisterMatcher>(M)->VT == VT;
}
unsigned getHashImpl() const override {
return ((unsigned)(intptr_t)Reg) << 4 | VT;
}
};
/// EmitConvertToTargetMatcher - Emit an operation that reads a specified
/// recorded node and converts it from being a ISD::Constant to
/// ISD::TargetConstant, likewise for ConstantFP.
class EmitConvertToTargetMatcher : public Matcher {
unsigned Slot;
public:
EmitConvertToTargetMatcher(unsigned slot)
: Matcher(EmitConvertToTarget), Slot(slot) {}
unsigned getSlot() const { return Slot; }
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitConvertToTarget;
}
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<EmitConvertToTargetMatcher>(M)->Slot == Slot;
}
unsigned getHashImpl() const override { return Slot; }
};
/// EmitMergeInputChainsMatcher - Emit a node that merges a list of input
/// chains together with a token factor. The list of nodes are the nodes in the
/// matched pattern that have chain input/outputs. This node adds all input
/// chains of these nodes if they are not themselves a node in the pattern.
class EmitMergeInputChainsMatcher : public Matcher {
SmallVector<unsigned, 3> ChainNodes;
public:
EmitMergeInputChainsMatcher(ArrayRef<unsigned> nodes)
: Matcher(EmitMergeInputChains), ChainNodes(nodes.begin(), nodes.end()) {}
unsigned getNumNodes() const { return ChainNodes.size(); }
unsigned getNode(unsigned i) const {
assert(i < ChainNodes.size());
return ChainNodes[i];
}
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitMergeInputChains;
}
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<EmitMergeInputChainsMatcher>(M)->ChainNodes == ChainNodes;
}
unsigned getHashImpl() const override;
};
/// EmitCopyToRegMatcher - Emit a CopyToReg node from a value to a physreg,
/// pushing the chain and glue results.
///
class EmitCopyToRegMatcher : public Matcher {
unsigned SrcSlot; // Value to copy into the physreg.
Record *DestPhysReg;
public:
EmitCopyToRegMatcher(unsigned srcSlot, Record *destPhysReg)
: Matcher(EmitCopyToReg), SrcSlot(srcSlot), DestPhysReg(destPhysReg) {}
unsigned getSrcSlot() const { return SrcSlot; }
Record *getDestPhysReg() const { return DestPhysReg; }
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitCopyToReg;
}
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<EmitCopyToRegMatcher>(M)->SrcSlot == SrcSlot &&
cast<EmitCopyToRegMatcher>(M)->DestPhysReg == DestPhysReg;
}
unsigned getHashImpl() const override {
return SrcSlot ^ ((unsigned)(intptr_t)DestPhysReg << 4);
}
};
/// EmitNodeXFormMatcher - Emit an operation that runs an SDNodeXForm on a
/// recorded node and records the result.
class EmitNodeXFormMatcher : public Matcher {
unsigned Slot;
Record *NodeXForm;
public:
EmitNodeXFormMatcher(unsigned slot, Record *nodeXForm)
: Matcher(EmitNodeXForm), Slot(slot), NodeXForm(nodeXForm) {}
unsigned getSlot() const { return Slot; }
Record *getNodeXForm() const { return NodeXForm; }
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitNodeXForm;
}
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<EmitNodeXFormMatcher>(M)->Slot == Slot &&
cast<EmitNodeXFormMatcher>(M)->NodeXForm == NodeXForm;
}
unsigned getHashImpl() const override {
return Slot ^ ((unsigned)(intptr_t)NodeXForm << 4);
}
};
/// EmitNodeMatcherCommon - Common class shared between EmitNode and
/// MorphNodeTo.
class EmitNodeMatcherCommon : public Matcher {
std::string OpcodeName;
const SmallVector<MVT::SimpleValueType, 3> VTs;
const SmallVector<unsigned, 6> Operands;
bool HasChain, HasInGlue, HasOutGlue, HasMemRefs;
/// NumFixedArityOperands - If this is a fixed arity node, this is set to -1.
/// If this is a varidic node, this is set to the number of fixed arity
/// operands in the root of the pattern. The rest are appended to this node.
int NumFixedArityOperands;
public:
EmitNodeMatcherCommon(const std::string &opcodeName,
ArrayRef<MVT::SimpleValueType> vts,
ArrayRef<unsigned> operands,
bool hasChain, bool hasInGlue, bool hasOutGlue,
bool hasmemrefs,
int numfixedarityoperands, bool isMorphNodeTo)
: Matcher(isMorphNodeTo ? MorphNodeTo : EmitNode), OpcodeName(opcodeName),
VTs(vts.begin(), vts.end()), Operands(operands.begin(), operands.end()),
HasChain(hasChain), HasInGlue(hasInGlue), HasOutGlue(hasOutGlue),
HasMemRefs(hasmemrefs), NumFixedArityOperands(numfixedarityoperands) {}
const std::string &getOpcodeName() const { return OpcodeName; }
unsigned getNumVTs() const { return VTs.size(); }
MVT::SimpleValueType getVT(unsigned i) const {
assert(i < VTs.size());
return VTs[i];
}
unsigned getNumOperands() const { return Operands.size(); }
unsigned getOperand(unsigned i) const {
assert(i < Operands.size());
return Operands[i];
}
const SmallVectorImpl<MVT::SimpleValueType> &getVTList() const { return VTs; }
const SmallVectorImpl<unsigned> &getOperandList() const { return Operands; }
bool hasChain() const { return HasChain; }
bool hasInFlag() const { return HasInGlue; }
bool hasOutFlag() const { return HasOutGlue; }
bool hasMemRefs() const { return HasMemRefs; }
int getNumFixedArityOperands() const { return NumFixedArityOperands; }
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitNode || N->getKind() == MorphNodeTo;
}
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override;
unsigned getHashImpl() const override;
};
/// EmitNodeMatcher - This signals a successful match and generates a node.
class EmitNodeMatcher : public EmitNodeMatcherCommon {
void anchor() override;
unsigned FirstResultSlot;
public:
EmitNodeMatcher(const std::string &opcodeName,
ArrayRef<MVT::SimpleValueType> vts,
ArrayRef<unsigned> operands,
bool hasChain, bool hasInFlag, bool hasOutFlag,
bool hasmemrefs,
int numfixedarityoperands, unsigned firstresultslot)
: EmitNodeMatcherCommon(opcodeName, vts, operands, hasChain,
hasInFlag, hasOutFlag, hasmemrefs,
numfixedarityoperands, false),
FirstResultSlot(firstresultslot) {}
unsigned getFirstResultSlot() const { return FirstResultSlot; }
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitNode;
}
};
class MorphNodeToMatcher : public EmitNodeMatcherCommon {
void anchor() override;
const PatternToMatch &Pattern;
public:
MorphNodeToMatcher(const std::string &opcodeName,
ArrayRef<MVT::SimpleValueType> vts,
ArrayRef<unsigned> operands,
bool hasChain, bool hasInFlag, bool hasOutFlag,
bool hasmemrefs,
int numfixedarityoperands, const PatternToMatch &pattern)
: EmitNodeMatcherCommon(opcodeName, vts, operands, hasChain,
hasInFlag, hasOutFlag, hasmemrefs,
numfixedarityoperands, true),
Pattern(pattern) {
}
const PatternToMatch &getPattern() const { return Pattern; }
static inline bool classof(const Matcher *N) {
return N->getKind() == MorphNodeTo;
}
};
/// MarkGlueResultsMatcher - This node indicates which non-root nodes in the
/// pattern produce glue. This allows CompleteMatchMatcher to update them
/// with the output glue of the resultant code.
class MarkGlueResultsMatcher : public Matcher {
SmallVector<unsigned, 3> GlueResultNodes;
public:
MarkGlueResultsMatcher(ArrayRef<unsigned> nodes)
: Matcher(MarkGlueResults), GlueResultNodes(nodes.begin(), nodes.end()) {}
unsigned getNumNodes() const { return GlueResultNodes.size(); }
unsigned getNode(unsigned i) const {
assert(i < GlueResultNodes.size());
return GlueResultNodes[i];
}
static inline bool classof(const Matcher *N) {
return N->getKind() == MarkGlueResults;
}
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<MarkGlueResultsMatcher>(M)->GlueResultNodes == GlueResultNodes;
}
unsigned getHashImpl() const override;
};
/// CompleteMatchMatcher - Complete a match by replacing the results of the
/// pattern with the newly generated nodes. This also prints a comment
/// indicating the source and dest patterns.
class CompleteMatchMatcher : public Matcher {
SmallVector<unsigned, 2> Results;
const PatternToMatch &Pattern;
public:
CompleteMatchMatcher(ArrayRef<unsigned> results,
const PatternToMatch &pattern)
: Matcher(CompleteMatch), Results(results.begin(), results.end()),
Pattern(pattern) {}
unsigned getNumResults() const { return Results.size(); }
unsigned getResult(unsigned R) const { return Results[R]; }
const PatternToMatch &getPattern() const { return Pattern; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CompleteMatch;
}
private:
void printImpl(raw_ostream &OS, unsigned indent) const override;
bool isEqualImpl(const Matcher *M) const override {
return cast<CompleteMatchMatcher>(M)->Results == Results &&
&cast<CompleteMatchMatcher>(M)->Pattern == &Pattern;
}
unsigned getHashImpl() const override;
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/X86RecognizableInstr.h | //===- X86RecognizableInstr.h - Disassembler instruction spec ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is part of the X86 Disassembler Emitter.
// It contains the interface of a single recognizable instruction.
// Documentation for the disassembler emitter in general can be found in
// X86DisasemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_UTILS_TABLEGEN_X86RECOGNIZABLEINSTR_H
#define LLVM_UTILS_TABLEGEN_X86RECOGNIZABLEINSTR_H
#include "CodeGenTarget.h"
#include "X86DisassemblerTables.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/TableGen/Record.h"
namespace llvm {
namespace X86Disassembler {
/// RecognizableInstr - Encapsulates all information required to decode a single
/// instruction, as extracted from the LLVM instruction tables. Has methods
/// to interpret the information available in the LLVM tables, and to emit the
/// instruction into DisassemblerTables.
class RecognizableInstr {
private:
/// The opcode of the instruction, as used in an MCInst
InstrUID UID;
/// The record from the .td files corresponding to this instruction
const Record* Rec;
/// The OpPrefix field from the record
uint8_t OpPrefix;
/// The OpMap field from the record
uint8_t OpMap;
/// The opcode field from the record; this is the opcode used in the Intel
/// encoding and therefore distinct from the UID
uint8_t Opcode;
/// The form field from the record
uint8_t Form;
// The encoding field from the record
uint8_t Encoding;
/// The OpSize field from the record
uint8_t OpSize;
/// The AdSize field from the record
uint8_t AdSize;
/// The hasREX_WPrefix field from the record
bool HasREX_WPrefix;
/// The hasVEX_4V field from the record
bool HasVEX_4V;
/// The hasVEX_4VOp3 field from the record
bool HasVEX_4VOp3;
/// The hasVEX_WPrefix field from the record
bool HasVEX_WPrefix;
/// Inferred from the operands; indicates whether the L bit in the VEX prefix is set
bool HasVEX_LPrefix;
/// The hasMemOp4Prefix field from the record
bool HasMemOp4Prefix;
/// The ignoreVEX_L field from the record
bool IgnoresVEX_L;
/// The hasEVEX_L2Prefix field from the record
bool HasEVEX_L2Prefix;
/// The hasEVEX_K field from the record
bool HasEVEX_K;
/// The hasEVEX_KZ field from the record
bool HasEVEX_KZ;
/// The hasEVEX_B field from the record
bool HasEVEX_B;
/// The isCodeGenOnly field from the record
bool IsCodeGenOnly;
/// The ForceDisassemble field from the record
bool ForceDisassemble;
// The CD8_Scale field from the record
uint8_t CD8_Scale;
// Whether the instruction has the predicate "In64BitMode"
bool Is64Bit;
// Whether the instruction has the predicate "In32BitMode"
bool Is32Bit;
/// The instruction name as listed in the tables
std::string Name;
/// The AT&T AsmString for the instruction
std::string AsmString;
/// Indicates whether the instruction should be emitted into the decode
/// tables; regardless, it will be emitted into the instruction info table
bool ShouldBeEmitted;
/// The operands of the instruction, as listed in the CodeGenInstruction.
/// They are not one-to-one with operands listed in the MCInst; for example,
/// memory operands expand to 5 operands in the MCInst
const std::vector<CGIOperandList::OperandInfo>* Operands;
/// The description of the instruction that is emitted into the instruction
/// info table
InstructionSpecifier* Spec;
/// insnContext - Returns the primary context in which the instruction is
/// valid.
///
/// @return - The context in which the instruction is valid.
InstructionContext insnContext() const;
/// typeFromString - Translates an operand type from the string provided in
/// the LLVM tables to an OperandType for use in the operand specifier.
///
/// @param s - The string, as extracted by calling Rec->getName()
/// on a CodeGenInstruction::OperandInfo.
/// @param hasREX_WPrefix - Indicates whether the instruction has a REX.W
/// prefix. If it does, 32-bit register operands stay
/// 32-bit regardless of the operand size.
/// @param OpSize Indicates the operand size of the instruction.
/// If register size does not match OpSize, then
/// register sizes keep their size.
/// @return - The operand's type.
static OperandType typeFromString(const std::string& s,
bool hasREX_WPrefix, uint8_t OpSize);
/// immediateEncodingFromString - Translates an immediate encoding from the
/// string provided in the LLVM tables to an OperandEncoding for use in
/// the operand specifier.
///
/// @param s - See typeFromString().
/// @param OpSize - Indicates whether this is an OpSize16 instruction.
/// If it is not, then 16-bit immediate operands stay 16-bit.
/// @return - The operand's encoding.
static OperandEncoding immediateEncodingFromString(const std::string &s,
uint8_t OpSize);
/// rmRegisterEncodingFromString - Like immediateEncodingFromString, but
/// handles operands that are in the REG field of the ModR/M byte.
static OperandEncoding rmRegisterEncodingFromString(const std::string &s,
uint8_t OpSize);
/// rmRegisterEncodingFromString - Like immediateEncodingFromString, but
/// handles operands that are in the REG field of the ModR/M byte.
static OperandEncoding roRegisterEncodingFromString(const std::string &s,
uint8_t OpSize);
static OperandEncoding memoryEncodingFromString(const std::string &s,
uint8_t OpSize);
static OperandEncoding relocationEncodingFromString(const std::string &s,
uint8_t OpSize);
static OperandEncoding opcodeModifierEncodingFromString(const std::string &s,
uint8_t OpSize);
static OperandEncoding vvvvRegisterEncodingFromString(const std::string &s,
uint8_t OpSize);
static OperandEncoding writemaskRegisterEncodingFromString(const std::string &s,
uint8_t OpSize);
/// \brief Adjust the encoding type for an operand based on the instruction.
void adjustOperandEncoding(OperandEncoding &encoding);
/// handleOperand - Converts a single operand from the LLVM table format to
/// the emitted table format, handling any duplicate operands it encounters
/// and then one non-duplicate.
///
/// @param optional - Determines whether to assert that the
/// operand exists.
/// @param operandIndex - The index into the generated operand table.
/// Incremented by this function one or more
/// times to reflect possible duplicate
/// operands).
/// @param physicalOperandIndex - The index of the current operand into the
/// set of non-duplicate ('physical') operands.
/// Incremented by this function once.
/// @param numPhysicalOperands - The number of non-duplicate operands in the
/// instructions.
/// @param operandMapping - The operand mapping, which has an entry for
/// each operand that indicates whether it is a
/// duplicate, and of what.
void handleOperand(bool optional,
unsigned &operandIndex,
unsigned &physicalOperandIndex,
unsigned &numPhysicalOperands,
const unsigned *operandMapping,
OperandEncoding (*encodingFromString)
(const std::string&,
uint8_t OpSize));
/// shouldBeEmitted - Returns the shouldBeEmitted field. Although filter()
/// filters out many instructions, at various points in decoding we
/// determine that the instruction should not actually be decodable. In
/// particular, MMX MOV instructions aren't emitted, but they're only
/// identified during operand parsing.
///
/// @return - true if at this point we believe the instruction should be
/// emitted; false if not. This will return false if filter() returns false
/// once emitInstructionSpecifier() has been called.
bool shouldBeEmitted() const {
return ShouldBeEmitted;
}
/// emitInstructionSpecifier - Loads the instruction specifier for the current
/// instruction into a DisassemblerTables.
///
void emitInstructionSpecifier();
/// emitDecodePath - Populates the proper fields in the decode tables
/// corresponding to the decode paths for this instruction.
///
/// \param tables The DisassemblerTables to populate with the decode
/// decode information for the current instruction.
void emitDecodePath(DisassemblerTables &tables) const;
/// Constructor - Initializes a RecognizableInstr with the appropriate fields
/// from a CodeGenInstruction.
///
/// \param tables The DisassemblerTables that the specifier will be added to.
/// \param insn The CodeGenInstruction to extract information from.
/// \param uid The unique ID of the current instruction.
RecognizableInstr(DisassemblerTables &tables,
const CodeGenInstruction &insn,
InstrUID uid);
public:
/// processInstr - Accepts a CodeGenInstruction and loads decode information
/// for it into a DisassemblerTables if appropriate.
///
/// \param tables The DiassemblerTables to be populated with decode
/// information.
/// \param insn The CodeGenInstruction to be used as a source for this
/// information.
/// \param uid The unique ID of the instruction.
static void processInstr(DisassemblerTables &tables,
const CodeGenInstruction &insn,
InstrUID uid);
};
} // namespace X86Disassembler
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/X86DisassemblerTables.h | //===- X86DisassemblerTables.h - Disassembler tables ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is part of the X86 Disassembler Emitter.
// It contains the interface of the disassembler tables.
// Documentation for the disassembler emitter in general can be found in
// X86DisasemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_UTILS_TABLEGEN_X86DISASSEMBLERTABLES_H
#define LLVM_UTILS_TABLEGEN_X86DISASSEMBLERTABLES_H
#include "X86DisassemblerShared.h"
#include "X86ModRMFilters.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
#include <vector>
namespace llvm {
namespace X86Disassembler {
/// DisassemblerTables - Encapsulates all the decode tables being generated by
/// the table emitter. Contains functions to populate the tables as well as
/// to emit them as hierarchical C structures suitable for consumption by the
/// runtime.
class DisassemblerTables {
private:
/// The decoder tables. There is one for each opcode type:
/// [0] one-byte opcodes
/// [1] two-byte opcodes of the form 0f __
/// [2] three-byte opcodes of the form 0f 38 __
/// [3] three-byte opcodes of the form 0f 3a __
/// [4] XOP8 map opcode
/// [5] XOP9 map opcode
/// [6] XOPA map opcode
ContextDecision* Tables[7];
// Table of ModRM encodings.
typedef std::map<std::vector<unsigned>, unsigned> ModRMMapTy;
mutable ModRMMapTy ModRMTable;
/// The instruction information table
std::vector<InstructionSpecifier> InstructionSpecifiers;
/// True if there are primary decode conflicts in the instruction set
bool HasConflicts;
/// emitModRMDecision - Emits a table of entries corresponding to a single
/// ModR/M decision. Compacts the ModR/M decision if possible. ModR/M
/// decisions are printed as:
///
/// { /* struct ModRMDecision */
/// TYPE,
/// modRMTablennnn
/// }
///
/// where nnnn is a unique ID for the corresponding table of IDs.
/// TYPE indicates whether the table has one entry that is the same
/// regardless of ModR/M byte, two entries - one for bytes 0x00-0xbf and one
/// for bytes 0xc0-0xff -, or 256 entries, one for each possible byte.
/// nnnn is the number of a table for looking up these values. The tables
/// are written separately so that tables consisting entirely of zeros will
/// not be duplicated. (These all have the name modRMEmptyTable.) A table
/// is printed as:
///
/// InstrUID modRMTablennnn[k] = {
/// nnnn, /* MNEMONIC */
/// ...
/// nnnn /* MNEMONIC */
/// };
///
/// @param o1 - The output stream to print the ID table to.
/// @param o2 - The output stream to print the decision structure to.
/// @param i1 - The indentation level to use with stream o1.
/// @param i2 - The indentation level to use with stream o2.
/// @param ModRMTableNum - next table number for adding to ModRMTable.
/// @param decision - The ModR/M decision to emit. This decision has 256
/// entries - emitModRMDecision decides how to compact it.
void emitModRMDecision(raw_ostream &o1, raw_ostream &o2,
unsigned &i1, unsigned &i2, unsigned &ModRMTableNum,
ModRMDecision &decision) const;
/// emitOpcodeDecision - Emits an OpcodeDecision and all its subsidiary ModR/M
/// decisions. An OpcodeDecision is printed as:
///
/// { /* struct OpcodeDecision */
/// /* 0x00 */
/// { /* struct ModRMDecision */
/// ...
/// }
/// ...
/// }
///
/// where the ModRMDecision structure is printed as described in the
/// documentation for emitModRMDecision(). emitOpcodeDecision() passes on a
/// stream and indent level for the UID tables generated by
/// emitModRMDecision(), but does not use them itself.
///
/// @param o1 - The output stream to print the ID tables generated by
/// emitModRMDecision() to.
/// @param o2 - The output stream for the decision structure itself.
/// @param i1 - The indent level to use with stream o1.
/// @param i2 - The indent level to use with stream o2.
/// @param ModRMTableNum - next table number for adding to ModRMTable.
/// @param decision - The OpcodeDecision to emit along with its subsidiary
/// structures.
void emitOpcodeDecision(raw_ostream &o1, raw_ostream &o2,
unsigned &i1, unsigned &i2, unsigned &ModRMTableNum,
OpcodeDecision &decision) const;
/// emitContextDecision - Emits a ContextDecision and all its subsidiary
/// Opcode and ModRMDecisions. A ContextDecision is printed as:
///
/// struct ContextDecision NAME = {
/// { /* OpcodeDecisions */
/// /* IC */
/// { /* struct OpcodeDecision */
/// ...
/// },
/// ...
/// }
/// }
///
/// NAME is the name of the ContextDecision (typically one of the four names
/// ONEBYTE_SYM, TWOBYTE_SYM, THREEBYTE38_SYM, THREEBYTE3A_SYM from
/// X86DisassemblerDecoderCommon.h).
/// IC is one of the contexts in InstructionContext. There is an opcode
/// decision for each possible context.
/// The OpcodeDecision structures are printed as described in the
/// documentation for emitOpcodeDecision.
///
/// @param o1 - The output stream to print the ID tables generated by
/// emitModRMDecision() to.
/// @param o2 - The output stream to print the decision structure to.
/// @param i1 - The indent level to use with stream o1.
/// @param i2 - The indent level to use with stream o2.
/// @param ModRMTableNum - next table number for adding to ModRMTable.
/// @param decision - The ContextDecision to emit along with its subsidiary
/// structures.
/// @param name - The name for the ContextDecision.
void emitContextDecision(raw_ostream &o1, raw_ostream &o2,
unsigned &i1, unsigned &i2, unsigned &ModRMTableNum,
ContextDecision &decision, const char* name) const;
/// emitInstructionInfo - Prints the instruction specifier table, which has
/// one entry for each instruction, and contains name and operand
/// information. This table is printed as:
///
/// struct InstructionSpecifier CONTEXTS_SYM[k] = {
/// {
/// /* nnnn */
/// "MNEMONIC",
/// 0xnn,
/// {
/// {
/// ENCODING,
/// TYPE
/// },
/// ...
/// }
/// },
/// };
///
/// k is the total number of instructions.
/// nnnn is the ID of the current instruction (0-based). This table
/// includes entries for non-instructions like PHINODE.
/// 0xnn is the lowest possible opcode for the current instruction, used for
/// AddRegFrm instructions to compute the operand's value.
/// ENCODING and TYPE describe the encoding and type for a single operand.
///
/// @param o - The output stream to which the instruction table should be
/// written.
/// @param i - The indent level for use with the stream.
void emitInstructionInfo(raw_ostream &o, unsigned &i) const;
/// emitContextTable - Prints the table that is used to translate from an
/// instruction attribute mask to an instruction context. This table is
/// printed as:
///
/// InstructionContext CONTEXTS_STR[256] = {
/// IC, /* 0x00 */
/// ...
/// };
///
/// IC is the context corresponding to the mask 0x00, and there are 256
/// possible masks.
///
/// @param o - The output stream to which the context table should be written.
/// @param i - The indent level for use with the stream.
void emitContextTable(raw_ostream &o, uint32_t &i) const;
/// emitContextDecisions - Prints all four ContextDecision structures using
/// emitContextDecision().
///
/// @param o1 - The output stream to print the ID tables generated by
/// emitModRMDecision() to.
/// @param o2 - The output stream to print the decision structures to.
/// @param i1 - The indent level to use with stream o1.
/// @param i2 - The indent level to use with stream o2.
/// @param ModRMTableNum - next table number for adding to ModRMTable.
void emitContextDecisions(raw_ostream &o1, raw_ostream &o2,
unsigned &i1, unsigned &i2,
unsigned &ModRMTableNum) const;
/// setTableFields - Uses a ModRMFilter to set the appropriate entries in a
/// ModRMDecision to refer to a particular instruction ID.
///
/// @param decision - The ModRMDecision to populate.
/// @param filter - The filter to use in deciding which entries to populate.
/// @param uid - The unique ID to set matching entries to.
/// @param opcode - The opcode of the instruction, for error reporting.
void setTableFields(ModRMDecision &decision,
const ModRMFilter &filter,
InstrUID uid,
uint8_t opcode);
public:
/// Constructor - Allocates space for the class decisions and clears them.
DisassemblerTables();
~DisassemblerTables();
/// emit - Emits the instruction table, context table, and class decisions.
///
/// @param o - The output stream to print the tables to.
void emit(raw_ostream &o) const;
/// setTableFields - Uses the opcode type, instruction context, opcode, and a
/// ModRMFilter as criteria to set a particular set of entries in the
/// decode tables to point to a specific uid.
///
/// @param type - The opcode type (ONEBYTE, TWOBYTE, etc.)
/// @param insnContext - The context to use (IC, IC_64BIT, etc.)
/// @param opcode - The last byte of the opcode (not counting any escape
/// or extended opcodes).
/// @param filter - The ModRMFilter that decides which ModR/M byte values
/// correspond to the desired instruction.
/// @param uid - The unique ID of the instruction.
/// @param is32bit - Instructon is only 32-bit
/// @param ignoresVEX_L - Instruction ignores VEX.L
/// @param AddrSize - Instructions address size 16/32/64. 0 is unspecified
void setTableFields(OpcodeType type,
InstructionContext insnContext,
uint8_t opcode,
const ModRMFilter &filter,
InstrUID uid,
bool is32bit,
bool ignoresVEX_L,
unsigned AddrSize);
/// specForUID - Returns the instruction specifier for a given unique
/// instruction ID. Used when resolving collisions.
///
/// @param uid - The unique ID of the instruction.
/// @return - A reference to the instruction specifier.
InstructionSpecifier& specForUID(InstrUID uid) {
if (uid >= InstructionSpecifiers.size())
InstructionSpecifiers.resize(uid + 1);
return InstructionSpecifiers[uid];
}
// hasConflicts - Reports whether there were primary decode conflicts
// from any instructions added to the tables.
// @return - true if there were; false otherwise.
bool hasConflicts() {
return HasConflicts;
}
};
} // namespace X86Disassembler
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/SubtargetEmitter.cpp | //===- SubtargetEmitter.cpp - Generate subtarget enumerations -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This tablegen backend emits subtarget enumerations.
//
//===----------------------------------------------------------------------===//
#include "CodeGenTarget.h"
#include "CodeGenSchedule.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
#include <map>
#include <string>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "subtarget-emitter"
namespace {
class SubtargetEmitter {
// Each processor has a SchedClassDesc table with an entry for each SchedClass.
// The SchedClassDesc table indexes into a global write resource table, write
// latency table, and read advance table.
struct SchedClassTables {
std::vector<std::vector<MCSchedClassDesc> > ProcSchedClasses;
std::vector<MCWriteProcResEntry> WriteProcResources;
std::vector<MCWriteLatencyEntry> WriteLatencies;
std::vector<std::string> WriterNames;
std::vector<MCReadAdvanceEntry> ReadAdvanceEntries;
// Reserve an invalid entry at index 0
SchedClassTables() {
ProcSchedClasses.resize(1);
WriteProcResources.resize(1);
WriteLatencies.resize(1);
WriterNames.push_back("InvalidWrite");
ReadAdvanceEntries.resize(1);
}
};
struct LessWriteProcResources {
bool operator()(const MCWriteProcResEntry &LHS,
const MCWriteProcResEntry &RHS) {
return LHS.ProcResourceIdx < RHS.ProcResourceIdx;
}
};
RecordKeeper &Records;
CodeGenSchedModels &SchedModels;
std::string Target;
void Enumeration(raw_ostream &OS, const char *ClassName);
unsigned FeatureKeyValues(raw_ostream &OS);
unsigned CPUKeyValues(raw_ostream &OS);
void FormItineraryStageString(const std::string &Names,
Record *ItinData, std::string &ItinString,
unsigned &NStages);
void FormItineraryOperandCycleString(Record *ItinData, std::string &ItinString,
unsigned &NOperandCycles);
void FormItineraryBypassString(const std::string &Names,
Record *ItinData,
std::string &ItinString, unsigned NOperandCycles);
void EmitStageAndOperandCycleData(raw_ostream &OS,
std::vector<std::vector<InstrItinerary> >
&ProcItinLists);
void EmitItineraries(raw_ostream &OS,
std::vector<std::vector<InstrItinerary> >
&ProcItinLists);
void EmitProcessorProp(raw_ostream &OS, const Record *R, const char *Name,
char Separator);
void EmitProcessorResources(const CodeGenProcModel &ProcModel,
raw_ostream &OS);
Record *FindWriteResources(const CodeGenSchedRW &SchedWrite,
const CodeGenProcModel &ProcModel);
Record *FindReadAdvance(const CodeGenSchedRW &SchedRead,
const CodeGenProcModel &ProcModel);
void ExpandProcResources(RecVec &PRVec, std::vector<int64_t> &Cycles,
const CodeGenProcModel &ProcModel);
void GenSchedClassTables(const CodeGenProcModel &ProcModel,
SchedClassTables &SchedTables);
void EmitSchedClassTables(SchedClassTables &SchedTables, raw_ostream &OS);
void EmitProcessorModels(raw_ostream &OS);
void EmitProcessorLookup(raw_ostream &OS);
void EmitSchedModelHelpers(std::string ClassName, raw_ostream &OS);
void EmitSchedModel(raw_ostream &OS);
void ParseFeaturesFunction(raw_ostream &OS, unsigned NumFeatures,
unsigned NumProcs);
public:
SubtargetEmitter(RecordKeeper &R, CodeGenTarget &TGT):
Records(R), SchedModels(TGT.getSchedModels()), Target(TGT.getName()) {}
void run(raw_ostream &o);
};
} // End anonymous namespace
//
// Enumeration - Emit the specified class as an enumeration.
//
void SubtargetEmitter::Enumeration(raw_ostream &OS,
const char *ClassName) {
// Get all records of class and sort
std::vector<Record*> DefList = Records.getAllDerivedDefinitions(ClassName);
std::sort(DefList.begin(), DefList.end(), LessRecord());
unsigned N = DefList.size();
if (N == 0)
return;
if (N > MAX_SUBTARGET_FEATURES)
PrintFatalError("Too many subtarget features! Bump MAX_SUBTARGET_FEATURES.");
OS << "namespace " << Target << " {\n";
// Open enumeration. Use a 64-bit underlying type.
OS << "enum : uint64_t {\n";
// For each record
for (unsigned i = 0; i < N;) {
// Next record
Record *Def = DefList[i];
// Get and emit name
OS << " " << Def->getName() << " = " << i;
if (++i < N) OS << ",";
OS << "\n";
}
// Close enumeration and namespace
OS << "};\n}\n";
}
//
// FeatureKeyValues - Emit data of all the subtarget features. Used by the
// command line.
//
unsigned SubtargetEmitter::FeatureKeyValues(raw_ostream &OS) {
// Gather and sort all the features
std::vector<Record*> FeatureList =
Records.getAllDerivedDefinitions("SubtargetFeature");
if (FeatureList.empty())
return 0;
std::sort(FeatureList.begin(), FeatureList.end(), LessRecordFieldName());
// Begin feature table
OS << "// Sorted (by key) array of values for CPU features.\n"
<< "extern const llvm::SubtargetFeatureKV " << Target
<< "FeatureKV[] = {\n";
// For each feature
unsigned NumFeatures = 0;
for (unsigned i = 0, N = FeatureList.size(); i < N; ++i) {
// Next feature
Record *Feature = FeatureList[i];
const std::string &Name = Feature->getName();
const std::string &CommandLineName = Feature->getValueAsString("Name");
const std::string &Desc = Feature->getValueAsString("Desc");
if (CommandLineName.empty()) continue;
// Emit as { "feature", "description", { featureEnum }, { i1 , i2 , ... , in } }
OS << " { "
<< "\"" << CommandLineName << "\", "
<< "\"" << Desc << "\", "
<< "{ " << Target << "::" << Name << " }, ";
const std::vector<Record*> &ImpliesList =
Feature->getValueAsListOfDefs("Implies");
if (ImpliesList.empty()) {
OS << "{ }";
} else {
OS << "{ ";
for (unsigned j = 0, M = ImpliesList.size(); j < M;) {
OS << Target << "::" << ImpliesList[j]->getName();
if (++j < M) OS << ", ";
}
OS << " }";
}
OS << " }";
++NumFeatures;
// Depending on 'if more in the list' emit comma
if ((i + 1) < N) OS << ",";
OS << "\n";
}
// End feature table
OS << "};\n";
return NumFeatures;
}
//
// CPUKeyValues - Emit data of all the subtarget processors. Used by command
// line.
//
unsigned SubtargetEmitter::CPUKeyValues(raw_ostream &OS) {
// Gather and sort processor information
std::vector<Record*> ProcessorList =
Records.getAllDerivedDefinitions("Processor");
std::sort(ProcessorList.begin(), ProcessorList.end(), LessRecordFieldName());
// Begin processor table
OS << "// Sorted (by key) array of values for CPU subtype.\n"
<< "extern const llvm::SubtargetFeatureKV " << Target
<< "SubTypeKV[] = {\n";
// For each processor
for (unsigned i = 0, N = ProcessorList.size(); i < N;) {
// Next processor
Record *Processor = ProcessorList[i];
const std::string &Name = Processor->getValueAsString("Name");
const std::vector<Record*> &FeatureList =
Processor->getValueAsListOfDefs("Features");
// Emit as { "cpu", "description", { f1 , f2 , ... fn } },
OS << " { "
<< "\"" << Name << "\", "
<< "\"Select the " << Name << " processor\", ";
if (FeatureList.empty()) {
OS << "{ }";
} else {
OS << "{ ";
for (unsigned j = 0, M = FeatureList.size(); j < M;) {
OS << Target << "::" << FeatureList[j]->getName();
if (++j < M) OS << ", ";
}
OS << " }";
}
// The { } is for the "implies" section of this data structure.
OS << ", { } }";
// Depending on 'if more in the list' emit comma
if (++i < N) OS << ",";
OS << "\n";
}
// End processor table
OS << "};\n";
return ProcessorList.size();
}
//
// FormItineraryStageString - Compose a string containing the stage
// data initialization for the specified itinerary. N is the number
// of stages.
//
void SubtargetEmitter::FormItineraryStageString(const std::string &Name,
Record *ItinData,
std::string &ItinString,
unsigned &NStages) {
// Get states list
const std::vector<Record*> &StageList =
ItinData->getValueAsListOfDefs("Stages");
// For each stage
unsigned N = NStages = StageList.size();
for (unsigned i = 0; i < N;) {
// Next stage
const Record *Stage = StageList[i];
// Form string as ,{ cycles, u1 | u2 | ... | un, timeinc, kind }
int Cycles = Stage->getValueAsInt("Cycles");
ItinString += " { " + itostr(Cycles) + ", ";
// Get unit list
const std::vector<Record*> &UnitList = Stage->getValueAsListOfDefs("Units");
// For each unit
for (unsigned j = 0, M = UnitList.size(); j < M;) {
// Add name and bitwise or
ItinString += Name + "FU::" + UnitList[j]->getName();
if (++j < M) ItinString += " | ";
}
int TimeInc = Stage->getValueAsInt("TimeInc");
ItinString += ", " + itostr(TimeInc);
int Kind = Stage->getValueAsInt("Kind");
ItinString += ", (llvm::InstrStage::ReservationKinds)" + itostr(Kind);
// Close off stage
ItinString += " }";
if (++i < N) ItinString += ", ";
}
}
//
// FormItineraryOperandCycleString - Compose a string containing the
// operand cycle initialization for the specified itinerary. N is the
// number of operands that has cycles specified.
//
void SubtargetEmitter::FormItineraryOperandCycleString(Record *ItinData,
std::string &ItinString, unsigned &NOperandCycles) {
// Get operand cycle list
const std::vector<int64_t> &OperandCycleList =
ItinData->getValueAsListOfInts("OperandCycles");
// For each operand cycle
unsigned N = NOperandCycles = OperandCycleList.size();
for (unsigned i = 0; i < N;) {
// Next operand cycle
const int OCycle = OperandCycleList[i];
ItinString += " " + itostr(OCycle);
if (++i < N) ItinString += ", ";
}
}
void SubtargetEmitter::FormItineraryBypassString(const std::string &Name,
Record *ItinData,
std::string &ItinString,
unsigned NOperandCycles) {
const std::vector<Record*> &BypassList =
ItinData->getValueAsListOfDefs("Bypasses");
unsigned N = BypassList.size();
unsigned i = 0;
for (; i < N;) {
ItinString += Name + "Bypass::" + BypassList[i]->getName();
if (++i < NOperandCycles) ItinString += ", ";
}
for (; i < NOperandCycles;) {
ItinString += " 0";
if (++i < NOperandCycles) ItinString += ", ";
}
}
//
// EmitStageAndOperandCycleData - Generate unique itinerary stages and operand
// cycle tables. Create a list of InstrItinerary objects (ProcItinLists) indexed
// by CodeGenSchedClass::Index.
//
void SubtargetEmitter::
EmitStageAndOperandCycleData(raw_ostream &OS,
std::vector<std::vector<InstrItinerary> >
&ProcItinLists) {
// Multiple processor models may share an itinerary record. Emit it once.
SmallPtrSet<Record*, 8> ItinsDefSet;
// Emit functional units for all the itineraries.
for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
if (!ItinsDefSet.insert(PI->ItinsDef).second)
continue;
std::vector<Record*> FUs = PI->ItinsDef->getValueAsListOfDefs("FU");
if (FUs.empty())
continue;
const std::string &Name = PI->ItinsDef->getName();
OS << "\n// Functional units for \"" << Name << "\"\n"
<< "namespace " << Name << "FU {\n";
for (unsigned j = 0, FUN = FUs.size(); j < FUN; ++j)
OS << " const unsigned " << FUs[j]->getName()
<< " = 1 << " << j << ";\n";
OS << "}\n";
std::vector<Record*> BPs = PI->ItinsDef->getValueAsListOfDefs("BP");
if (!BPs.empty()) {
OS << "\n// Pipeline forwarding pathes for itineraries \"" << Name
<< "\"\n" << "namespace " << Name << "Bypass {\n";
OS << " const unsigned NoBypass = 0;\n";
for (unsigned j = 0, BPN = BPs.size(); j < BPN; ++j)
OS << " const unsigned " << BPs[j]->getName()
<< " = 1 << " << j << ";\n";
OS << "}\n";
}
}
// Begin stages table
std::string StageTable = "\nextern const llvm::InstrStage " + Target +
"Stages[] = {\n";
StageTable += " { 0, 0, 0, llvm::InstrStage::Required }, // No itinerary\n";
// Begin operand cycle table
std::string OperandCycleTable = "extern const unsigned " + Target +
"OperandCycles[] = {\n";
OperandCycleTable += " 0, // No itinerary\n";
// Begin pipeline bypass table
std::string BypassTable = "extern const unsigned " + Target +
"ForwardingPaths[] = {\n";
BypassTable += " 0, // No itinerary\n";
// For each Itinerary across all processors, add a unique entry to the stages,
// operand cycles, and pipepine bypess tables. Then add the new Itinerary
// object with computed offsets to the ProcItinLists result.
unsigned StageCount = 1, OperandCycleCount = 1;
std::map<std::string, unsigned> ItinStageMap, ItinOperandMap;
for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
const CodeGenProcModel &ProcModel = *PI;
// Add process itinerary to the list.
ProcItinLists.resize(ProcItinLists.size()+1);
// If this processor defines no itineraries, then leave the itinerary list
// empty.
std::vector<InstrItinerary> &ItinList = ProcItinLists.back();
if (!ProcModel.hasItineraries())
continue;
const std::string &Name = ProcModel.ItinsDef->getName();
ItinList.resize(SchedModels.numInstrSchedClasses());
assert(ProcModel.ItinDefList.size() == ItinList.size() && "bad Itins");
for (unsigned SchedClassIdx = 0, SchedClassEnd = ItinList.size();
SchedClassIdx < SchedClassEnd; ++SchedClassIdx) {
// Next itinerary data
Record *ItinData = ProcModel.ItinDefList[SchedClassIdx];
// Get string and stage count
std::string ItinStageString;
unsigned NStages = 0;
if (ItinData)
FormItineraryStageString(Name, ItinData, ItinStageString, NStages);
// Get string and operand cycle count
std::string ItinOperandCycleString;
unsigned NOperandCycles = 0;
std::string ItinBypassString;
if (ItinData) {
FormItineraryOperandCycleString(ItinData, ItinOperandCycleString,
NOperandCycles);
FormItineraryBypassString(Name, ItinData, ItinBypassString,
NOperandCycles);
}
// Check to see if stage already exists and create if it doesn't
unsigned FindStage = 0;
if (NStages > 0) {
FindStage = ItinStageMap[ItinStageString];
if (FindStage == 0) {
// Emit as { cycles, u1 | u2 | ... | un, timeinc }, // indices
StageTable += ItinStageString + ", // " + itostr(StageCount);
if (NStages > 1)
StageTable += "-" + itostr(StageCount + NStages - 1);
StageTable += "\n";
// Record Itin class number.
ItinStageMap[ItinStageString] = FindStage = StageCount;
StageCount += NStages;
}
}
// Check to see if operand cycle already exists and create if it doesn't
unsigned FindOperandCycle = 0;
if (NOperandCycles > 0) {
std::string ItinOperandString = ItinOperandCycleString+ItinBypassString;
FindOperandCycle = ItinOperandMap[ItinOperandString];
if (FindOperandCycle == 0) {
// Emit as cycle, // index
OperandCycleTable += ItinOperandCycleString + ", // ";
std::string OperandIdxComment = itostr(OperandCycleCount);
if (NOperandCycles > 1)
OperandIdxComment += "-"
+ itostr(OperandCycleCount + NOperandCycles - 1);
OperandCycleTable += OperandIdxComment + "\n";
// Record Itin class number.
ItinOperandMap[ItinOperandCycleString] =
FindOperandCycle = OperandCycleCount;
// Emit as bypass, // index
BypassTable += ItinBypassString + ", // " + OperandIdxComment + "\n";
OperandCycleCount += NOperandCycles;
}
}
// Set up itinerary as location and location + stage count
int NumUOps = ItinData ? ItinData->getValueAsInt("NumMicroOps") : 0;
InstrItinerary Intinerary = { NumUOps, FindStage, FindStage + NStages,
FindOperandCycle,
FindOperandCycle + NOperandCycles};
// Inject - empty slots will be 0, 0
ItinList[SchedClassIdx] = Intinerary;
}
}
// Closing stage
StageTable += " { 0, 0, 0, llvm::InstrStage::Required } // End stages\n";
StageTable += "};\n";
// Closing operand cycles
OperandCycleTable += " 0 // End operand cycles\n";
OperandCycleTable += "};\n";
BypassTable += " 0 // End bypass tables\n";
BypassTable += "};\n";
// Emit tables.
OS << StageTable;
OS << OperandCycleTable;
OS << BypassTable;
}
//
// EmitProcessorData - Generate data for processor itineraries that were
// computed during EmitStageAndOperandCycleData(). ProcItinLists lists all
// Itineraries for each processor. The Itinerary lists are indexed on
// CodeGenSchedClass::Index.
//
void SubtargetEmitter::
EmitItineraries(raw_ostream &OS,
std::vector<std::vector<InstrItinerary> > &ProcItinLists) {
// Multiple processor models may share an itinerary record. Emit it once.
SmallPtrSet<Record*, 8> ItinsDefSet;
// For each processor's machine model
std::vector<std::vector<InstrItinerary> >::iterator
ProcItinListsIter = ProcItinLists.begin();
for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
PE = SchedModels.procModelEnd(); PI != PE; ++PI, ++ProcItinListsIter) {
Record *ItinsDef = PI->ItinsDef;
if (!ItinsDefSet.insert(ItinsDef).second)
continue;
// Get processor itinerary name
const std::string &Name = ItinsDef->getName();
// Get the itinerary list for the processor.
assert(ProcItinListsIter != ProcItinLists.end() && "bad iterator");
std::vector<InstrItinerary> &ItinList = *ProcItinListsIter;
// Empty itineraries aren't referenced anywhere in the tablegen output
// so don't emit them.
if (ItinList.empty())
continue;
OS << "\n";
OS << "static const llvm::InstrItinerary ";
// Begin processor itinerary table
OS << Name << "[] = {\n";
// For each itinerary class in CodeGenSchedClass::Index order.
for (unsigned j = 0, M = ItinList.size(); j < M; ++j) {
InstrItinerary &Intinerary = ItinList[j];
// Emit Itinerary in the form of
// { firstStage, lastStage, firstCycle, lastCycle } // index
OS << " { " <<
Intinerary.NumMicroOps << ", " <<
Intinerary.FirstStage << ", " <<
Intinerary.LastStage << ", " <<
Intinerary.FirstOperandCycle << ", " <<
Intinerary.LastOperandCycle << " }" <<
", // " << j << " " << SchedModels.getSchedClass(j).Name << "\n";
}
// End processor itinerary table
OS << " { 0, ~0U, ~0U, ~0U, ~0U } // end marker\n";
OS << "};\n";
}
}
// Emit either the value defined in the TableGen Record, or the default
// value defined in the C++ header. The Record is null if the processor does not
// define a model.
void SubtargetEmitter::EmitProcessorProp(raw_ostream &OS, const Record *R,
const char *Name, char Separator) {
OS << " ";
int V = R ? R->getValueAsInt(Name) : -1;
if (V >= 0)
OS << V << Separator << " // " << Name;
else
OS << "MCSchedModel::Default" << Name << Separator;
OS << '\n';
}
void SubtargetEmitter::EmitProcessorResources(const CodeGenProcModel &ProcModel,
raw_ostream &OS) {
char Sep = ProcModel.ProcResourceDefs.empty() ? ' ' : ',';
OS << "\n// {Name, NumUnits, SuperIdx, IsBuffered}\n";
OS << "static const llvm::MCProcResourceDesc "
<< ProcModel.ModelName << "ProcResources" << "[] = {\n"
<< " {DBGFIELD(\"InvalidUnit\") 0, 0, 0}" << Sep << "\n";
for (unsigned i = 0, e = ProcModel.ProcResourceDefs.size(); i < e; ++i) {
Record *PRDef = ProcModel.ProcResourceDefs[i];
Record *SuperDef = nullptr;
unsigned SuperIdx = 0;
unsigned NumUnits = 0;
int BufferSize = PRDef->getValueAsInt("BufferSize");
if (PRDef->isSubClassOf("ProcResGroup")) {
RecVec ResUnits = PRDef->getValueAsListOfDefs("Resources");
for (RecIter RUI = ResUnits.begin(), RUE = ResUnits.end();
RUI != RUE; ++RUI) {
NumUnits += (*RUI)->getValueAsInt("NumUnits");
}
}
else {
// Find the SuperIdx
if (PRDef->getValueInit("Super")->isComplete()) {
SuperDef = SchedModels.findProcResUnits(
PRDef->getValueAsDef("Super"), ProcModel);
SuperIdx = ProcModel.getProcResourceIdx(SuperDef);
}
NumUnits = PRDef->getValueAsInt("NumUnits");
}
// Emit the ProcResourceDesc
if (i+1 == e)
Sep = ' ';
OS << " {DBGFIELD(\"" << PRDef->getName() << "\") ";
if (PRDef->getName().size() < 15)
OS.indent(15 - PRDef->getName().size());
OS << NumUnits << ", " << SuperIdx << ", "
<< BufferSize << "}" << Sep << " // #" << i+1;
if (SuperDef)
OS << ", Super=" << SuperDef->getName();
OS << "\n";
}
OS << "};\n";
}
// Find the WriteRes Record that defines processor resources for this
// SchedWrite.
Record *SubtargetEmitter::FindWriteResources(
const CodeGenSchedRW &SchedWrite, const CodeGenProcModel &ProcModel) {
// Check if the SchedWrite is already subtarget-specific and directly
// specifies a set of processor resources.
if (SchedWrite.TheDef->isSubClassOf("SchedWriteRes"))
return SchedWrite.TheDef;
Record *AliasDef = nullptr;
for (RecIter AI = SchedWrite.Aliases.begin(), AE = SchedWrite.Aliases.end();
AI != AE; ++AI) {
const CodeGenSchedRW &AliasRW =
SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW"));
if (AliasRW.TheDef->getValueInit("SchedModel")->isComplete()) {
Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel");
if (&SchedModels.getProcModel(ModelDef) != &ProcModel)
continue;
}
if (AliasDef)
PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
"defined for processor " + ProcModel.ModelName +
" Ensure only one SchedAlias exists per RW.");
AliasDef = AliasRW.TheDef;
}
if (AliasDef && AliasDef->isSubClassOf("SchedWriteRes"))
return AliasDef;
// Check this processor's list of write resources.
Record *ResDef = nullptr;
for (RecIter WRI = ProcModel.WriteResDefs.begin(),
WRE = ProcModel.WriteResDefs.end(); WRI != WRE; ++WRI) {
if (!(*WRI)->isSubClassOf("WriteRes"))
continue;
if (AliasDef == (*WRI)->getValueAsDef("WriteType")
|| SchedWrite.TheDef == (*WRI)->getValueAsDef("WriteType")) {
if (ResDef) {
PrintFatalError((*WRI)->getLoc(), "Resources are defined for both "
"SchedWrite and its alias on processor " +
ProcModel.ModelName);
}
ResDef = *WRI;
}
}
// TODO: If ProcModel has a base model (previous generation processor),
// then call FindWriteResources recursively with that model here.
if (!ResDef) {
PrintFatalError(ProcModel.ModelDef->getLoc(),
std::string("Processor does not define resources for ")
+ SchedWrite.TheDef->getName());
}
return ResDef;
}
/// Find the ReadAdvance record for the given SchedRead on this processor or
/// return NULL.
Record *SubtargetEmitter::FindReadAdvance(const CodeGenSchedRW &SchedRead,
const CodeGenProcModel &ProcModel) {
// Check for SchedReads that directly specify a ReadAdvance.
if (SchedRead.TheDef->isSubClassOf("SchedReadAdvance"))
return SchedRead.TheDef;
// Check this processor's list of aliases for SchedRead.
Record *AliasDef = nullptr;
for (RecIter AI = SchedRead.Aliases.begin(), AE = SchedRead.Aliases.end();
AI != AE; ++AI) {
const CodeGenSchedRW &AliasRW =
SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW"));
if (AliasRW.TheDef->getValueInit("SchedModel")->isComplete()) {
Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel");
if (&SchedModels.getProcModel(ModelDef) != &ProcModel)
continue;
}
if (AliasDef)
PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
"defined for processor " + ProcModel.ModelName +
" Ensure only one SchedAlias exists per RW.");
AliasDef = AliasRW.TheDef;
}
if (AliasDef && AliasDef->isSubClassOf("SchedReadAdvance"))
return AliasDef;
// Check this processor's ReadAdvanceList.
Record *ResDef = nullptr;
for (RecIter RAI = ProcModel.ReadAdvanceDefs.begin(),
RAE = ProcModel.ReadAdvanceDefs.end(); RAI != RAE; ++RAI) {
if (!(*RAI)->isSubClassOf("ReadAdvance"))
continue;
if (AliasDef == (*RAI)->getValueAsDef("ReadType")
|| SchedRead.TheDef == (*RAI)->getValueAsDef("ReadType")) {
if (ResDef) {
PrintFatalError((*RAI)->getLoc(), "Resources are defined for both "
"SchedRead and its alias on processor " +
ProcModel.ModelName);
}
ResDef = *RAI;
}
}
// TODO: If ProcModel has a base model (previous generation processor),
// then call FindReadAdvance recursively with that model here.
if (!ResDef && SchedRead.TheDef->getName() != "ReadDefault") {
PrintFatalError(ProcModel.ModelDef->getLoc(),
std::string("Processor does not define resources for ")
+ SchedRead.TheDef->getName());
}
return ResDef;
}
// Expand an explicit list of processor resources into a full list of implied
// resource groups and super resources that cover them.
void SubtargetEmitter::ExpandProcResources(RecVec &PRVec,
std::vector<int64_t> &Cycles,
const CodeGenProcModel &PM) {
// Default to 1 resource cycle.
Cycles.resize(PRVec.size(), 1);
for (unsigned i = 0, e = PRVec.size(); i != e; ++i) {
Record *PRDef = PRVec[i];
RecVec SubResources;
if (PRDef->isSubClassOf("ProcResGroup"))
SubResources = PRDef->getValueAsListOfDefs("Resources");
else {
SubResources.push_back(PRDef);
PRDef = SchedModels.findProcResUnits(PRVec[i], PM);
for (Record *SubDef = PRDef;
SubDef->getValueInit("Super")->isComplete();) {
if (SubDef->isSubClassOf("ProcResGroup")) {
// Disallow this for simplicitly.
PrintFatalError(SubDef->getLoc(), "Processor resource group "
" cannot be a super resources.");
}
Record *SuperDef =
SchedModels.findProcResUnits(SubDef->getValueAsDef("Super"), PM);
PRVec.push_back(SuperDef);
Cycles.push_back(Cycles[i]);
SubDef = SuperDef;
}
}
for (RecIter PRI = PM.ProcResourceDefs.begin(),
PRE = PM.ProcResourceDefs.end();
PRI != PRE; ++PRI) {
if (*PRI == PRDef || !(*PRI)->isSubClassOf("ProcResGroup"))
continue;
RecVec SuperResources = (*PRI)->getValueAsListOfDefs("Resources");
RecIter SubI = SubResources.begin(), SubE = SubResources.end();
for( ; SubI != SubE; ++SubI) {
if (std::find(SuperResources.begin(), SuperResources.end(), *SubI)
== SuperResources.end()) {
break;
}
}
if (SubI == SubE) {
PRVec.push_back(*PRI);
Cycles.push_back(Cycles[i]);
}
}
}
}
// Generate the SchedClass table for this processor and update global
// tables. Must be called for each processor in order.
void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel,
SchedClassTables &SchedTables) {
SchedTables.ProcSchedClasses.resize(SchedTables.ProcSchedClasses.size() + 1);
if (!ProcModel.hasInstrSchedModel())
return;
std::vector<MCSchedClassDesc> &SCTab = SchedTables.ProcSchedClasses.back();
for (CodeGenSchedModels::SchedClassIter SCI = SchedModels.schedClassBegin(),
SCE = SchedModels.schedClassEnd(); SCI != SCE; ++SCI) {
DEBUG(SCI->dump(&SchedModels));
SCTab.resize(SCTab.size() + 1);
MCSchedClassDesc &SCDesc = SCTab.back();
// SCDesc.Name is guarded by NDEBUG
SCDesc.NumMicroOps = 0;
SCDesc.BeginGroup = false;
SCDesc.EndGroup = false;
SCDesc.WriteProcResIdx = 0;
SCDesc.WriteLatencyIdx = 0;
SCDesc.ReadAdvanceIdx = 0;
// A Variant SchedClass has no resources of its own.
bool HasVariants = false;
for (std::vector<CodeGenSchedTransition>::const_iterator
TI = SCI->Transitions.begin(), TE = SCI->Transitions.end();
TI != TE; ++TI) {
if (TI->ProcIndices[0] == 0) {
HasVariants = true;
break;
}
IdxIter PIPos = std::find(TI->ProcIndices.begin(),
TI->ProcIndices.end(), ProcModel.Index);
if (PIPos != TI->ProcIndices.end()) {
HasVariants = true;
break;
}
}
if (HasVariants) {
SCDesc.NumMicroOps = MCSchedClassDesc::VariantNumMicroOps;
continue;
}
// Determine if the SchedClass is actually reachable on this processor. If
// not don't try to locate the processor resources, it will fail.
// If ProcIndices contains 0, this class applies to all processors.
assert(!SCI->ProcIndices.empty() && "expect at least one procidx");
if (SCI->ProcIndices[0] != 0) {
IdxIter PIPos = std::find(SCI->ProcIndices.begin(),
SCI->ProcIndices.end(), ProcModel.Index);
if (PIPos == SCI->ProcIndices.end())
continue;
}
IdxVec Writes = SCI->Writes;
IdxVec Reads = SCI->Reads;
if (!SCI->InstRWs.empty()) {
// This class has a default ReadWrite list which can be overriden by
// InstRW definitions.
Record *RWDef = nullptr;
for (RecIter RWI = SCI->InstRWs.begin(), RWE = SCI->InstRWs.end();
RWI != RWE; ++RWI) {
Record *RWModelDef = (*RWI)->getValueAsDef("SchedModel");
if (&ProcModel == &SchedModels.getProcModel(RWModelDef)) {
RWDef = *RWI;
break;
}
}
if (RWDef) {
Writes.clear();
Reads.clear();
SchedModels.findRWs(RWDef->getValueAsListOfDefs("OperandReadWrites"),
Writes, Reads);
}
}
if (Writes.empty()) {
// Check this processor's itinerary class resources.
for (RecIter II = ProcModel.ItinRWDefs.begin(),
IE = ProcModel.ItinRWDefs.end(); II != IE; ++II) {
RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses");
if (std::find(Matched.begin(), Matched.end(), SCI->ItinClassDef)
!= Matched.end()) {
SchedModels.findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"),
Writes, Reads);
break;
}
}
if (Writes.empty()) {
DEBUG(dbgs() << ProcModel.ModelName
<< " does not have resources for class " << SCI->Name << '\n');
}
}
// Sum resources across all operand writes.
std::vector<MCWriteProcResEntry> WriteProcResources;
std::vector<MCWriteLatencyEntry> WriteLatencies;
std::vector<std::string> WriterNames;
std::vector<MCReadAdvanceEntry> ReadAdvanceEntries;
for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI) {
IdxVec WriteSeq;
SchedModels.expandRWSeqForProc(*WI, WriteSeq, /*IsRead=*/false,
ProcModel);
// For each operand, create a latency entry.
MCWriteLatencyEntry WLEntry;
WLEntry.Cycles = 0;
unsigned WriteID = WriteSeq.back();
WriterNames.push_back(SchedModels.getSchedWrite(WriteID).Name);
// If this Write is not referenced by a ReadAdvance, don't distinguish it
// from other WriteLatency entries.
if (!SchedModels.hasReadOfWrite(
SchedModels.getSchedWrite(WriteID).TheDef)) {
WriteID = 0;
}
WLEntry.WriteResourceID = WriteID;
for (IdxIter WSI = WriteSeq.begin(), WSE = WriteSeq.end();
WSI != WSE; ++WSI) {
Record *WriteRes =
FindWriteResources(SchedModels.getSchedWrite(*WSI), ProcModel);
// Mark the parent class as invalid for unsupported write types.
if (WriteRes->getValueAsBit("Unsupported")) {
SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps;
break;
}
WLEntry.Cycles += WriteRes->getValueAsInt("Latency");
SCDesc.NumMicroOps += WriteRes->getValueAsInt("NumMicroOps");
SCDesc.BeginGroup |= WriteRes->getValueAsBit("BeginGroup");
SCDesc.EndGroup |= WriteRes->getValueAsBit("EndGroup");
// Create an entry for each ProcResource listed in WriteRes.
RecVec PRVec = WriteRes->getValueAsListOfDefs("ProcResources");
std::vector<int64_t> Cycles =
WriteRes->getValueAsListOfInts("ResourceCycles");
ExpandProcResources(PRVec, Cycles, ProcModel);
for (unsigned PRIdx = 0, PREnd = PRVec.size();
PRIdx != PREnd; ++PRIdx) {
MCWriteProcResEntry WPREntry;
WPREntry.ProcResourceIdx = ProcModel.getProcResourceIdx(PRVec[PRIdx]);
assert(WPREntry.ProcResourceIdx && "Bad ProcResourceIdx");
WPREntry.Cycles = Cycles[PRIdx];
// If this resource is already used in this sequence, add the current
// entry's cycles so that the same resource appears to be used
// serially, rather than multiple parallel uses. This is important for
// in-order machine where the resource consumption is a hazard.
unsigned WPRIdx = 0, WPREnd = WriteProcResources.size();
for( ; WPRIdx != WPREnd; ++WPRIdx) {
if (WriteProcResources[WPRIdx].ProcResourceIdx
== WPREntry.ProcResourceIdx) {
WriteProcResources[WPRIdx].Cycles += WPREntry.Cycles;
break;
}
}
if (WPRIdx == WPREnd)
WriteProcResources.push_back(WPREntry);
}
}
WriteLatencies.push_back(WLEntry);
}
// Create an entry for each operand Read in this SchedClass.
// Entries must be sorted first by UseIdx then by WriteResourceID.
for (unsigned UseIdx = 0, EndIdx = Reads.size();
UseIdx != EndIdx; ++UseIdx) {
Record *ReadAdvance =
FindReadAdvance(SchedModels.getSchedRead(Reads[UseIdx]), ProcModel);
if (!ReadAdvance)
continue;
// Mark the parent class as invalid for unsupported write types.
if (ReadAdvance->getValueAsBit("Unsupported")) {
SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps;
break;
}
RecVec ValidWrites = ReadAdvance->getValueAsListOfDefs("ValidWrites");
IdxVec WriteIDs;
if (ValidWrites.empty())
WriteIDs.push_back(0);
else {
for (RecIter VWI = ValidWrites.begin(), VWE = ValidWrites.end();
VWI != VWE; ++VWI) {
WriteIDs.push_back(SchedModels.getSchedRWIdx(*VWI, /*IsRead=*/false));
}
}
std::sort(WriteIDs.begin(), WriteIDs.end());
for(IdxIter WI = WriteIDs.begin(), WE = WriteIDs.end(); WI != WE; ++WI) {
MCReadAdvanceEntry RAEntry;
RAEntry.UseIdx = UseIdx;
RAEntry.WriteResourceID = *WI;
RAEntry.Cycles = ReadAdvance->getValueAsInt("Cycles");
ReadAdvanceEntries.push_back(RAEntry);
}
}
if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
WriteProcResources.clear();
WriteLatencies.clear();
ReadAdvanceEntries.clear();
}
// Add the information for this SchedClass to the global tables using basic
// compression.
//
// WritePrecRes entries are sorted by ProcResIdx.
std::sort(WriteProcResources.begin(), WriteProcResources.end(),
LessWriteProcResources());
SCDesc.NumWriteProcResEntries = WriteProcResources.size();
std::vector<MCWriteProcResEntry>::iterator WPRPos =
std::search(SchedTables.WriteProcResources.begin(),
SchedTables.WriteProcResources.end(),
WriteProcResources.begin(), WriteProcResources.end());
if (WPRPos != SchedTables.WriteProcResources.end())
SCDesc.WriteProcResIdx = WPRPos - SchedTables.WriteProcResources.begin();
else {
SCDesc.WriteProcResIdx = SchedTables.WriteProcResources.size();
SchedTables.WriteProcResources.insert(WPRPos, WriteProcResources.begin(),
WriteProcResources.end());
}
// Latency entries must remain in operand order.
SCDesc.NumWriteLatencyEntries = WriteLatencies.size();
std::vector<MCWriteLatencyEntry>::iterator WLPos =
std::search(SchedTables.WriteLatencies.begin(),
SchedTables.WriteLatencies.end(),
WriteLatencies.begin(), WriteLatencies.end());
if (WLPos != SchedTables.WriteLatencies.end()) {
unsigned idx = WLPos - SchedTables.WriteLatencies.begin();
SCDesc.WriteLatencyIdx = idx;
for (unsigned i = 0, e = WriteLatencies.size(); i < e; ++i)
if (SchedTables.WriterNames[idx + i].find(WriterNames[i]) ==
std::string::npos) {
SchedTables.WriterNames[idx + i] += std::string("_") + WriterNames[i];
}
}
else {
SCDesc.WriteLatencyIdx = SchedTables.WriteLatencies.size();
SchedTables.WriteLatencies.insert(SchedTables.WriteLatencies.end(),
WriteLatencies.begin(),
WriteLatencies.end());
SchedTables.WriterNames.insert(SchedTables.WriterNames.end(),
WriterNames.begin(), WriterNames.end());
}
// ReadAdvanceEntries must remain in operand order.
SCDesc.NumReadAdvanceEntries = ReadAdvanceEntries.size();
std::vector<MCReadAdvanceEntry>::iterator RAPos =
std::search(SchedTables.ReadAdvanceEntries.begin(),
SchedTables.ReadAdvanceEntries.end(),
ReadAdvanceEntries.begin(), ReadAdvanceEntries.end());
if (RAPos != SchedTables.ReadAdvanceEntries.end())
SCDesc.ReadAdvanceIdx = RAPos - SchedTables.ReadAdvanceEntries.begin();
else {
SCDesc.ReadAdvanceIdx = SchedTables.ReadAdvanceEntries.size();
SchedTables.ReadAdvanceEntries.insert(RAPos, ReadAdvanceEntries.begin(),
ReadAdvanceEntries.end());
}
}
}
// Emit SchedClass tables for all processors and associated global tables.
void SubtargetEmitter::EmitSchedClassTables(SchedClassTables &SchedTables,
raw_ostream &OS) {
// Emit global WriteProcResTable.
OS << "\n// {ProcResourceIdx, Cycles}\n"
<< "extern const llvm::MCWriteProcResEntry "
<< Target << "WriteProcResTable[] = {\n"
<< " { 0, 0}, // Invalid\n";
for (unsigned WPRIdx = 1, WPREnd = SchedTables.WriteProcResources.size();
WPRIdx != WPREnd; ++WPRIdx) {
MCWriteProcResEntry &WPREntry = SchedTables.WriteProcResources[WPRIdx];
OS << " {" << format("%2d", WPREntry.ProcResourceIdx) << ", "
<< format("%2d", WPREntry.Cycles) << "}";
if (WPRIdx + 1 < WPREnd)
OS << ',';
OS << " // #" << WPRIdx << '\n';
}
OS << "}; // " << Target << "WriteProcResTable\n";
// Emit global WriteLatencyTable.
OS << "\n// {Cycles, WriteResourceID}\n"
<< "extern const llvm::MCWriteLatencyEntry "
<< Target << "WriteLatencyTable[] = {\n"
<< " { 0, 0}, // Invalid\n";
for (unsigned WLIdx = 1, WLEnd = SchedTables.WriteLatencies.size();
WLIdx != WLEnd; ++WLIdx) {
MCWriteLatencyEntry &WLEntry = SchedTables.WriteLatencies[WLIdx];
OS << " {" << format("%2d", WLEntry.Cycles) << ", "
<< format("%2d", WLEntry.WriteResourceID) << "}";
if (WLIdx + 1 < WLEnd)
OS << ',';
OS << " // #" << WLIdx << " " << SchedTables.WriterNames[WLIdx] << '\n';
}
OS << "}; // " << Target << "WriteLatencyTable\n";
// Emit global ReadAdvanceTable.
OS << "\n// {UseIdx, WriteResourceID, Cycles}\n"
<< "extern const llvm::MCReadAdvanceEntry "
<< Target << "ReadAdvanceTable[] = {\n"
<< " {0, 0, 0}, // Invalid\n";
for (unsigned RAIdx = 1, RAEnd = SchedTables.ReadAdvanceEntries.size();
RAIdx != RAEnd; ++RAIdx) {
MCReadAdvanceEntry &RAEntry = SchedTables.ReadAdvanceEntries[RAIdx];
OS << " {" << RAEntry.UseIdx << ", "
<< format("%2d", RAEntry.WriteResourceID) << ", "
<< format("%2d", RAEntry.Cycles) << "}";
if (RAIdx + 1 < RAEnd)
OS << ',';
OS << " // #" << RAIdx << '\n';
}
OS << "}; // " << Target << "ReadAdvanceTable\n";
// Emit a SchedClass table for each processor.
for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
if (!PI->hasInstrSchedModel())
continue;
std::vector<MCSchedClassDesc> &SCTab =
SchedTables.ProcSchedClasses[1 + (PI - SchedModels.procModelBegin())];
OS << "\n// {Name, NumMicroOps, BeginGroup, EndGroup,"
<< " WriteProcResIdx,#, WriteLatencyIdx,#, ReadAdvanceIdx,#}\n";
OS << "static const llvm::MCSchedClassDesc "
<< PI->ModelName << "SchedClasses[] = {\n";
// The first class is always invalid. We no way to distinguish it except by
// name and position.
assert(SchedModels.getSchedClass(0).Name == "NoInstrModel"
&& "invalid class not first");
OS << " {DBGFIELD(\"InvalidSchedClass\") "
<< MCSchedClassDesc::InvalidNumMicroOps
<< ", 0, 0, 0, 0, 0, 0, 0, 0},\n";
for (unsigned SCIdx = 1, SCEnd = SCTab.size(); SCIdx != SCEnd; ++SCIdx) {
MCSchedClassDesc &MCDesc = SCTab[SCIdx];
const CodeGenSchedClass &SchedClass = SchedModels.getSchedClass(SCIdx);
OS << " {DBGFIELD(\"" << SchedClass.Name << "\") ";
if (SchedClass.Name.size() < 18)
OS.indent(18 - SchedClass.Name.size());
OS << MCDesc.NumMicroOps
<< ", " << MCDesc.BeginGroup << ", " << MCDesc.EndGroup
<< ", " << format("%2d", MCDesc.WriteProcResIdx)
<< ", " << MCDesc.NumWriteProcResEntries
<< ", " << format("%2d", MCDesc.WriteLatencyIdx)
<< ", " << MCDesc.NumWriteLatencyEntries
<< ", " << format("%2d", MCDesc.ReadAdvanceIdx)
<< ", " << MCDesc.NumReadAdvanceEntries << "}";
if (SCIdx + 1 < SCEnd)
OS << ',';
OS << " // #" << SCIdx << '\n';
}
OS << "}; // " << PI->ModelName << "SchedClasses\n";
}
}
void SubtargetEmitter::EmitProcessorModels(raw_ostream &OS) {
// For each processor model.
for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
// Emit processor resource table.
if (PI->hasInstrSchedModel())
EmitProcessorResources(*PI, OS);
else if(!PI->ProcResourceDefs.empty())
PrintFatalError(PI->ModelDef->getLoc(), "SchedMachineModel defines "
"ProcResources without defining WriteRes SchedWriteRes");
// Begin processor itinerary properties
OS << "\n";
OS << "static const llvm::MCSchedModel " << PI->ModelName << " = {\n";
EmitProcessorProp(OS, PI->ModelDef, "IssueWidth", ',');
EmitProcessorProp(OS, PI->ModelDef, "MicroOpBufferSize", ',');
EmitProcessorProp(OS, PI->ModelDef, "LoopMicroOpBufferSize", ',');
EmitProcessorProp(OS, PI->ModelDef, "LoadLatency", ',');
EmitProcessorProp(OS, PI->ModelDef, "HighLatency", ',');
EmitProcessorProp(OS, PI->ModelDef, "MispredictPenalty", ',');
OS << " " << (bool)(PI->ModelDef ?
PI->ModelDef->getValueAsBit("PostRAScheduler") : 0)
<< ", // " << "PostRAScheduler\n";
OS << " " << (bool)(PI->ModelDef ?
PI->ModelDef->getValueAsBit("CompleteModel") : 0)
<< ", // " << "CompleteModel\n";
OS << " " << PI->Index << ", // Processor ID\n";
if (PI->hasInstrSchedModel())
OS << " " << PI->ModelName << "ProcResources" << ",\n"
<< " " << PI->ModelName << "SchedClasses" << ",\n"
<< " " << PI->ProcResourceDefs.size()+1 << ",\n"
<< " " << (SchedModels.schedClassEnd()
- SchedModels.schedClassBegin()) << ",\n";
else
OS << " 0, 0, 0, 0, // No instruction-level machine model.\n";
if (PI->hasItineraries())
OS << " " << PI->ItinsDef->getName() << "};\n";
else
OS << " nullptr}; // No Itinerary\n";
}
}
//
// EmitProcessorLookup - generate cpu name to itinerary lookup table.
//
void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) {
// Gather and sort processor information
std::vector<Record*> ProcessorList =
Records.getAllDerivedDefinitions("Processor");
std::sort(ProcessorList.begin(), ProcessorList.end(), LessRecordFieldName());
// Begin processor table
OS << "\n";
OS << "// Sorted (by key) array of itineraries for CPU subtype.\n"
<< "extern const llvm::SubtargetInfoKV "
<< Target << "ProcSchedKV[] = {\n";
// For each processor
for (unsigned i = 0, N = ProcessorList.size(); i < N;) {
// Next processor
Record *Processor = ProcessorList[i];
const std::string &Name = Processor->getValueAsString("Name");
const std::string &ProcModelName =
SchedModels.getModelForProc(Processor).ModelName;
// Emit as { "cpu", procinit },
OS << " { \"" << Name << "\", (const void *)&" << ProcModelName << " }";
// Depending on ''if more in the list'' emit comma
if (++i < N) OS << ",";
OS << "\n";
}
// End processor table
OS << "};\n";
}
//
// EmitSchedModel - Emits all scheduling model tables, folding common patterns.
//
void SubtargetEmitter::EmitSchedModel(raw_ostream &OS) {
OS << "#ifdef DBGFIELD\n"
<< "#error \"<target>GenSubtargetInfo.inc requires a DBGFIELD macro\"\n"
<< "#endif\n"
<< "#ifndef NDEBUG\n"
<< "#define DBGFIELD(x) x,\n"
<< "#else\n"
<< "#define DBGFIELD(x)\n"
<< "#endif\n";
if (SchedModels.hasItineraries()) {
std::vector<std::vector<InstrItinerary> > ProcItinLists;
// Emit the stage data
EmitStageAndOperandCycleData(OS, ProcItinLists);
EmitItineraries(OS, ProcItinLists);
}
OS << "\n// ===============================================================\n"
<< "// Data tables for the new per-operand machine model.\n";
SchedClassTables SchedTables;
for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
GenSchedClassTables(*PI, SchedTables);
}
EmitSchedClassTables(SchedTables, OS);
// Emit the processor machine model
EmitProcessorModels(OS);
// Emit the processor lookup data
EmitProcessorLookup(OS);
OS << "#undef DBGFIELD";
}
void SubtargetEmitter::EmitSchedModelHelpers(std::string ClassName,
raw_ostream &OS) {
OS << "unsigned " << ClassName
<< "\n::resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,"
<< " const TargetSchedModel *SchedModel) const {\n";
std::vector<Record*> Prologs = Records.getAllDerivedDefinitions("PredicateProlog");
std::sort(Prologs.begin(), Prologs.end(), LessRecord());
for (std::vector<Record*>::const_iterator
PI = Prologs.begin(), PE = Prologs.end(); PI != PE; ++PI) {
OS << (*PI)->getValueAsString("Code") << '\n';
}
IdxVec VariantClasses;
for (CodeGenSchedModels::SchedClassIter SCI = SchedModels.schedClassBegin(),
SCE = SchedModels.schedClassEnd(); SCI != SCE; ++SCI) {
if (SCI->Transitions.empty())
continue;
VariantClasses.push_back(SCI->Index);
}
if (!VariantClasses.empty()) {
OS << " switch (SchedClass) {\n";
for (IdxIter VCI = VariantClasses.begin(), VCE = VariantClasses.end();
VCI != VCE; ++VCI) {
const CodeGenSchedClass &SC = SchedModels.getSchedClass(*VCI);
OS << " case " << *VCI << ": // " << SC.Name << '\n';
IdxVec ProcIndices;
for (std::vector<CodeGenSchedTransition>::const_iterator
TI = SC.Transitions.begin(), TE = SC.Transitions.end();
TI != TE; ++TI) {
IdxVec PI;
std::set_union(TI->ProcIndices.begin(), TI->ProcIndices.end(),
ProcIndices.begin(), ProcIndices.end(),
std::back_inserter(PI));
ProcIndices.swap(PI);
}
for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end();
PI != PE; ++PI) {
OS << " ";
if (*PI != 0)
OS << "if (SchedModel->getProcessorID() == " << *PI << ") ";
OS << "{ // " << (SchedModels.procModelBegin() + *PI)->ModelName
<< '\n';
for (std::vector<CodeGenSchedTransition>::const_iterator
TI = SC.Transitions.begin(), TE = SC.Transitions.end();
TI != TE; ++TI) {
if (*PI != 0 && !std::count(TI->ProcIndices.begin(),
TI->ProcIndices.end(), *PI)) {
continue;
}
OS << " if (";
for (RecIter RI = TI->PredTerm.begin(), RE = TI->PredTerm.end();
RI != RE; ++RI) {
if (RI != TI->PredTerm.begin())
OS << "\n && ";
OS << "(" << (*RI)->getValueAsString("Predicate") << ")";
}
OS << ")\n"
<< " return " << TI->ToClassIdx << "; // "
<< SchedModels.getSchedClass(TI->ToClassIdx).Name << '\n';
}
OS << " }\n";
if (*PI == 0)
break;
}
if (SC.isInferred())
OS << " return " << SC.Index << ";\n";
OS << " break;\n";
}
OS << " };\n";
}
OS << " report_fatal_error(\"Expected a variant SchedClass\");\n"
<< "} // " << ClassName << "::resolveSchedClass\n";
}
//
// ParseFeaturesFunction - Produces a subtarget specific function for parsing
// the subtarget features string.
//
void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS,
unsigned NumFeatures,
unsigned NumProcs) {
std::vector<Record*> Features =
Records.getAllDerivedDefinitions("SubtargetFeature");
std::sort(Features.begin(), Features.end(), LessRecord());
OS << "// ParseSubtargetFeatures - Parses features string setting specified\n"
<< "// subtarget options.\n"
<< "void llvm::";
OS << Target;
OS << "Subtarget::ParseSubtargetFeatures(StringRef CPU, StringRef FS) {\n"
<< " DEBUG(dbgs() << \"\\nFeatures:\" << FS);\n"
<< " DEBUG(dbgs() << \"\\nCPU:\" << CPU << \"\\n\\n\");\n";
if (Features.empty()) {
OS << "}\n";
return;
}
OS << " InitMCProcessorInfo(CPU, FS);\n"
<< " const FeatureBitset& Bits = getFeatureBits();\n";
for (unsigned i = 0; i < Features.size(); i++) {
// Next record
Record *R = Features[i];
const std::string &Instance = R->getName();
const std::string &Value = R->getValueAsString("Value");
const std::string &Attribute = R->getValueAsString("Attribute");
if (Value=="true" || Value=="false")
OS << " if (Bits[" << Target << "::"
<< Instance << "]) "
<< Attribute << " = " << Value << ";\n";
else
OS << " if (Bits[" << Target << "::"
<< Instance << "] && "
<< Attribute << " < " << Value << ") "
<< Attribute << " = " << Value << ";\n";
}
OS << "}\n";
}
//
// SubtargetEmitter::run - Main subtarget enumeration emitter.
//
void SubtargetEmitter::run(raw_ostream &OS) {
emitSourceFileHeader("Subtarget Enumeration Source Fragment", OS);
OS << "\n#ifdef GET_SUBTARGETINFO_ENUM\n";
OS << "#undef GET_SUBTARGETINFO_ENUM\n";
OS << "namespace llvm {\n";
Enumeration(OS, "SubtargetFeature");
OS << "} // End llvm namespace \n";
OS << "#endif // GET_SUBTARGETINFO_ENUM\n\n";
OS << "\n#ifdef GET_SUBTARGETINFO_MC_DESC\n";
OS << "#undef GET_SUBTARGETINFO_MC_DESC\n";
OS << "namespace llvm {\n";
#if 0
OS << "namespace {\n";
#endif
unsigned NumFeatures = FeatureKeyValues(OS);
OS << "\n";
unsigned NumProcs = CPUKeyValues(OS);
OS << "\n";
EmitSchedModel(OS);
OS << "\n";
#if 0
OS << "}\n";
#endif
// MCInstrInfo initialization routine.
OS << "static inline MCSubtargetInfo *create" << Target
<< "MCSubtargetInfoImpl("
<< "const Triple &TT, StringRef CPU, StringRef FS) {\n";
OS << " return new MCSubtargetInfo(TT, CPU, FS, ";
if (NumFeatures)
OS << Target << "FeatureKV, ";
else
OS << "None, ";
if (NumProcs)
OS << Target << "SubTypeKV, ";
else
OS << "None, ";
OS << '\n'; OS.indent(22);
OS << Target << "ProcSchedKV, "
<< Target << "WriteProcResTable, "
<< Target << "WriteLatencyTable, "
<< Target << "ReadAdvanceTable, ";
if (SchedModels.hasItineraries()) {
OS << '\n'; OS.indent(22);
OS << Target << "Stages, "
<< Target << "OperandCycles, "
<< Target << "ForwardingPaths";
} else
OS << "0, 0, 0";
OS << ");\n}\n\n";
OS << "} // End llvm namespace \n";
OS << "#endif // GET_SUBTARGETINFO_MC_DESC\n\n";
OS << "\n#ifdef GET_SUBTARGETINFO_TARGET_DESC\n";
OS << "#undef GET_SUBTARGETINFO_TARGET_DESC\n";
OS << "#include \"llvm/Support/Debug.h\"\n";
OS << "#include \"llvm/Support/raw_ostream.h\"\n";
ParseFeaturesFunction(OS, NumFeatures, NumProcs);
OS << "#endif // GET_SUBTARGETINFO_TARGET_DESC\n\n";
// Create a TargetSubtargetInfo subclass to hide the MC layer initialization.
OS << "\n#ifdef GET_SUBTARGETINFO_HEADER\n";
OS << "#undef GET_SUBTARGETINFO_HEADER\n";
std::string ClassName = Target + "GenSubtargetInfo";
OS << "namespace llvm {\n";
OS << "class DFAPacketizer;\n";
OS << "struct " << ClassName << " : public TargetSubtargetInfo {\n"
<< " explicit " << ClassName << "(const Triple &TT, StringRef CPU, "
<< "StringRef FS);\n"
<< "public:\n"
<< " unsigned resolveSchedClass(unsigned SchedClass, "
<< " const MachineInstr *DefMI,"
<< " const TargetSchedModel *SchedModel) const override;\n"
<< " DFAPacketizer *createDFAPacketizer(const InstrItineraryData *IID)"
<< " const;\n"
<< "};\n";
OS << "} // End llvm namespace \n";
OS << "#endif // GET_SUBTARGETINFO_HEADER\n\n";
OS << "\n#ifdef GET_SUBTARGETINFO_CTOR\n";
OS << "#undef GET_SUBTARGETINFO_CTOR\n";
OS << "#include \"llvm/CodeGen/TargetSchedule.h\"\n";
OS << "namespace llvm {\n";
OS << "extern const llvm::SubtargetFeatureKV " << Target << "FeatureKV[];\n";
OS << "extern const llvm::SubtargetFeatureKV " << Target << "SubTypeKV[];\n";
OS << "extern const llvm::SubtargetInfoKV " << Target << "ProcSchedKV[];\n";
OS << "extern const llvm::MCWriteProcResEntry "
<< Target << "WriteProcResTable[];\n";
OS << "extern const llvm::MCWriteLatencyEntry "
<< Target << "WriteLatencyTable[];\n";
OS << "extern const llvm::MCReadAdvanceEntry "
<< Target << "ReadAdvanceTable[];\n";
if (SchedModels.hasItineraries()) {
OS << "extern const llvm::InstrStage " << Target << "Stages[];\n";
OS << "extern const unsigned " << Target << "OperandCycles[];\n";
OS << "extern const unsigned " << Target << "ForwardingPaths[];\n";
}
OS << ClassName << "::" << ClassName << "(const Triple &TT, StringRef CPU, "
<< "StringRef FS)\n"
<< " : TargetSubtargetInfo(TT, CPU, FS, ";
if (NumFeatures)
OS << "makeArrayRef(" << Target << "FeatureKV, " << NumFeatures << "), ";
else
OS << "None, ";
if (NumProcs)
OS << "makeArrayRef(" << Target << "SubTypeKV, " << NumProcs << "), ";
else
OS << "None, ";
OS << '\n'; OS.indent(24);
OS << Target << "ProcSchedKV, "
<< Target << "WriteProcResTable, "
<< Target << "WriteLatencyTable, "
<< Target << "ReadAdvanceTable, ";
OS << '\n'; OS.indent(24);
if (SchedModels.hasItineraries()) {
OS << Target << "Stages, "
<< Target << "OperandCycles, "
<< Target << "ForwardingPaths";
} else
OS << "0, 0, 0";
OS << ") {}\n\n";
EmitSchedModelHelpers(ClassName, OS);
OS << "} // End llvm namespace \n";
OS << "#endif // GET_SUBTARGETINFO_CTOR\n\n";
}
namespace llvm {
void EmitSubtarget(RecordKeeper &RK, raw_ostream &OS) {
CodeGenTarget CGTarget(RK);
SubtargetEmitter(RK, CGTarget).run(OS);
}
} // End llvm namespace
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/AsmMatcherEmitter.cpp | //===- AsmMatcherEmitter.cpp - Generate an assembly matcher ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This tablegen backend emits a target specifier matcher for converting parsed
// assembly operands in the MCInst structures. It also emits a matcher for
// custom operand parsing.
//
// Converting assembly operands into MCInst structures
// ---------------------------------------------------
//
// The input to the target specific matcher is a list of literal tokens and
// operands. The target specific parser should generally eliminate any syntax
// which is not relevant for matching; for example, comma tokens should have
// already been consumed and eliminated by the parser. Most instructions will
// end up with a single literal token (the instruction name) and some number of
// operands.
//
// Some example inputs, for X86:
// 'addl' (immediate ...) (register ...)
// 'add' (immediate ...) (memory ...)
// 'call' '*' %epc
//
// The assembly matcher is responsible for converting this input into a precise
// machine instruction (i.e., an instruction with a well defined encoding). This
// mapping has several properties which complicate matching:
//
// - It may be ambiguous; many architectures can legally encode particular
// variants of an instruction in different ways (for example, using a smaller
// encoding for small immediates). Such ambiguities should never be
// arbitrarily resolved by the assembler, the assembler is always responsible
// for choosing the "best" available instruction.
//
// - It may depend on the subtarget or the assembler context. Instructions
// which are invalid for the current mode, but otherwise unambiguous (e.g.,
// an SSE instruction in a file being assembled for i486) should be accepted
// and rejected by the assembler front end. However, if the proper encoding
// for an instruction is dependent on the assembler context then the matcher
// is responsible for selecting the correct machine instruction for the
// current mode.
//
// The core matching algorithm attempts to exploit the regularity in most
// instruction sets to quickly determine the set of possibly matching
// instructions, and the simplify the generated code. Additionally, this helps
// to ensure that the ambiguities are intentionally resolved by the user.
//
// The matching is divided into two distinct phases:
//
// 1. Classification: Each operand is mapped to the unique set which (a)
// contains it, and (b) is the largest such subset for which a single
// instruction could match all members.
//
// For register classes, we can generate these subgroups automatically. For
// arbitrary operands, we expect the user to define the classes and their
// relations to one another (for example, 8-bit signed immediates as a
// subset of 32-bit immediates).
//
// By partitioning the operands in this way, we guarantee that for any
// tuple of classes, any single instruction must match either all or none
// of the sets of operands which could classify to that tuple.
//
// In addition, the subset relation amongst classes induces a partial order
// on such tuples, which we use to resolve ambiguities.
//
// 2. The input can now be treated as a tuple of classes (static tokens are
// simple singleton sets). Each such tuple should generally map to a single
// instruction (we currently ignore cases where this isn't true, whee!!!),
// which we can emit a simple matcher for.
//
// Custom Operand Parsing
// ----------------------
//
// Some targets need a custom way to parse operands, some specific instructions
// can contain arguments that can represent processor flags and other kinds of
// identifiers that need to be mapped to specific values in the final encoded
// instructions. The target specific custom operand parsing works in the
// following way:
//
// 1. A operand match table is built, each entry contains a mnemonic, an
// operand class, a mask for all operand positions for that same
// class/mnemonic and target features to be checked while trying to match.
//
// 2. The operand matcher will try every possible entry with the same
// mnemonic and will check if the target feature for this mnemonic also
// matches. After that, if the operand to be matched has its index
// present in the mask, a successful match occurs. Otherwise, fallback
// to the regular operand parsing.
//
// 3. For a match success, each operand class that has a 'ParserMethod'
// becomes part of a switch from where the custom method is called.
//
//===----------------------------------------------------------------------===//
#include "CodeGenTarget.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/StringMatcher.h"
#include "llvm/TableGen/StringToOffsetTable.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <cassert>
#include <cctype>
#include <map>
#include <set>
#include <sstream>
#include <forward_list>
using namespace llvm;
#define DEBUG_TYPE "asm-matcher-emitter"
static cl::opt<std::string>
MatchPrefix("match-prefix", cl::init(""),
cl::desc("Only match instructions with the given prefix"));
namespace {
class AsmMatcherInfo;
struct SubtargetFeatureInfo;
// Register sets are used as keys in some second-order sets TableGen creates
// when generating its data structures. This means that the order of two
// RegisterSets can be seen in the outputted AsmMatcher tables occasionally, and
// can even affect compiler output (at least seen in diagnostics produced when
// all matches fail). So we use a type that sorts them consistently.
typedef std::set<Record*, LessRecordByID> RegisterSet;
class AsmMatcherEmitter {
RecordKeeper &Records;
public:
AsmMatcherEmitter(RecordKeeper &R) : Records(R) {}
void run(raw_ostream &o);
};
/// ClassInfo - Helper class for storing the information about a particular
/// class of operands which can be matched.
struct ClassInfo {
enum ClassInfoKind {
/// Invalid kind, for use as a sentinel value.
Invalid = 0,
/// The class for a particular token.
Token,
/// The (first) register class, subsequent register classes are
/// RegisterClass0+1, and so on.
RegisterClass0,
/// The (first) user defined class, subsequent user defined classes are
/// UserClass0+1, and so on.
UserClass0 = 1<<16
};
/// Kind - The class kind, which is either a predefined kind, or (UserClass0 +
/// N) for the Nth user defined class.
unsigned Kind;
/// SuperClasses - The super classes of this class. Note that for simplicities
/// sake user operands only record their immediate super class, while register
/// operands include all superclasses.
std::vector<ClassInfo*> SuperClasses;
/// Name - The full class name, suitable for use in an enum.
std::string Name;
/// ClassName - The unadorned generic name for this class (e.g., Token).
std::string ClassName;
/// ValueName - The name of the value this class represents; for a token this
/// is the literal token string, for an operand it is the TableGen class (or
/// empty if this is a derived class).
std::string ValueName;
/// PredicateMethod - The name of the operand method to test whether the
/// operand matches this class; this is not valid for Token or register kinds.
std::string PredicateMethod;
/// RenderMethod - The name of the operand method to add this operand to an
/// MCInst; this is not valid for Token or register kinds.
std::string RenderMethod;
/// ParserMethod - The name of the operand method to do a target specific
/// parsing on the operand.
std::string ParserMethod;
/// For register classes: the records for all the registers in this class.
RegisterSet Registers;
/// For custom match classes: the diagnostic kind for when the predicate fails.
std::string DiagnosticType;
public:
/// isRegisterClass() - Check if this is a register class.
bool isRegisterClass() const {
return Kind >= RegisterClass0 && Kind < UserClass0;
}
/// isUserClass() - Check if this is a user defined class.
bool isUserClass() const {
return Kind >= UserClass0;
}
/// isRelatedTo - Check whether this class is "related" to \p RHS. Classes
/// are related if they are in the same class hierarchy.
bool isRelatedTo(const ClassInfo &RHS) const {
// Tokens are only related to tokens.
if (Kind == Token || RHS.Kind == Token)
return Kind == Token && RHS.Kind == Token;
// Registers classes are only related to registers classes, and only if
// their intersection is non-empty.
if (isRegisterClass() || RHS.isRegisterClass()) {
if (!isRegisterClass() || !RHS.isRegisterClass())
return false;
RegisterSet Tmp;
std::insert_iterator<RegisterSet> II(Tmp, Tmp.begin());
std::set_intersection(Registers.begin(), Registers.end(),
RHS.Registers.begin(), RHS.Registers.end(),
II, LessRecordByID());
return !Tmp.empty();
}
// Otherwise we have two users operands; they are related if they are in the
// same class hierarchy.
//
// FIXME: This is an oversimplification, they should only be related if they
// intersect, however we don't have that information.
assert(isUserClass() && RHS.isUserClass() && "Unexpected class!");
const ClassInfo *Root = this;
while (!Root->SuperClasses.empty())
Root = Root->SuperClasses.front();
const ClassInfo *RHSRoot = &RHS;
while (!RHSRoot->SuperClasses.empty())
RHSRoot = RHSRoot->SuperClasses.front();
return Root == RHSRoot;
}
/// isSubsetOf - Test whether this class is a subset of \p RHS.
bool isSubsetOf(const ClassInfo &RHS) const {
// This is a subset of RHS if it is the same class...
if (this == &RHS)
return true;
// ... or if any of its super classes are a subset of RHS.
for (const ClassInfo *CI : SuperClasses)
if (CI->isSubsetOf(RHS))
return true;
return false;
}
/// operator< - Compare two classes.
// FIXME: This ordering seems to be broken. For example:
// u64 < i64, i64 < s8, s8 < u64, forming a cycle
// u64 is a subset of i64
// i64 and s8 are not subsets of each other, so are ordered by name
// s8 and u64 are not subsets of each other, so are ordered by name
bool operator<(const ClassInfo &RHS) const {
if (this == &RHS)
return false;
// Unrelated classes can be ordered by kind.
if (!isRelatedTo(RHS))
return Kind < RHS.Kind;
switch (Kind) {
case Invalid:
llvm_unreachable("Invalid kind!");
default:
// This class precedes the RHS if it is a proper subset of the RHS.
if (isSubsetOf(RHS))
return true;
if (RHS.isSubsetOf(*this))
return false;
// Otherwise, order by name to ensure we have a total ordering.
return ValueName < RHS.ValueName;
}
}
};
/// MatchableInfo - Helper class for storing the necessary information for an
/// instruction or alias which is capable of being matched.
struct MatchableInfo {
struct AsmOperand {
/// Token - This is the token that the operand came from.
StringRef Token;
/// The unique class instance this operand should match.
ClassInfo *Class;
/// The operand name this is, if anything.
StringRef SrcOpName;
/// The suboperand index within SrcOpName, or -1 for the entire operand.
int SubOpIdx;
/// Whether the token is "isolated", i.e., it is preceded and followed
/// by separators.
bool IsIsolatedToken;
/// Register record if this token is singleton register.
Record *SingletonReg;
explicit AsmOperand(bool IsIsolatedToken, StringRef T)
: Token(T), Class(nullptr), SubOpIdx(-1),
IsIsolatedToken(IsIsolatedToken), SingletonReg(nullptr) {}
};
/// ResOperand - This represents a single operand in the result instruction
/// generated by the match. In cases (like addressing modes) where a single
/// assembler operand expands to multiple MCOperands, this represents the
/// single assembler operand, not the MCOperand.
struct ResOperand {
enum {
/// RenderAsmOperand - This represents an operand result that is
/// generated by calling the render method on the assembly operand. The
/// corresponding AsmOperand is specified by AsmOperandNum.
RenderAsmOperand,
/// TiedOperand - This represents a result operand that is a duplicate of
/// a previous result operand.
TiedOperand,
/// ImmOperand - This represents an immediate value that is dumped into
/// the operand.
ImmOperand,
/// RegOperand - This represents a fixed register that is dumped in.
RegOperand
} Kind;
union {
/// This is the operand # in the AsmOperands list that this should be
/// copied from.
unsigned AsmOperandNum;
/// TiedOperandNum - This is the (earlier) result operand that should be
/// copied from.
unsigned TiedOperandNum;
/// ImmVal - This is the immediate value added to the instruction.
int64_t ImmVal;
/// Register - This is the register record.
Record *Register;
};
/// MINumOperands - The number of MCInst operands populated by this
/// operand.
unsigned MINumOperands;
static ResOperand getRenderedOp(unsigned AsmOpNum, unsigned NumOperands) {
ResOperand X;
X.Kind = RenderAsmOperand;
X.AsmOperandNum = AsmOpNum;
X.MINumOperands = NumOperands;
return X;
}
static ResOperand getTiedOp(unsigned TiedOperandNum) {
ResOperand X;
X.Kind = TiedOperand;
X.TiedOperandNum = TiedOperandNum;
X.MINumOperands = 1;
return X;
}
static ResOperand getImmOp(int64_t Val) {
ResOperand X;
X.Kind = ImmOperand;
X.ImmVal = Val;
X.MINumOperands = 1;
return X;
}
static ResOperand getRegOp(Record *Reg) {
ResOperand X;
X.Kind = RegOperand;
X.Register = Reg;
X.MINumOperands = 1;
return X;
}
};
/// AsmVariantID - Target's assembly syntax variant no.
int AsmVariantID;
/// AsmString - The assembly string for this instruction (with variants
/// removed), e.g. "movsx $src, $dst".
std::string AsmString;
/// TheDef - This is the definition of the instruction or InstAlias that this
/// matchable came from.
Record *const TheDef;
/// DefRec - This is the definition that it came from.
PointerUnion<const CodeGenInstruction*, const CodeGenInstAlias*> DefRec;
const CodeGenInstruction *getResultInst() const {
if (DefRec.is<const CodeGenInstruction*>())
return DefRec.get<const CodeGenInstruction*>();
return DefRec.get<const CodeGenInstAlias*>()->ResultInst;
}
/// ResOperands - This is the operand list that should be built for the result
/// MCInst.
SmallVector<ResOperand, 8> ResOperands;
/// Mnemonic - This is the first token of the matched instruction, its
/// mnemonic.
StringRef Mnemonic;
/// AsmOperands - The textual operands that this instruction matches,
/// annotated with a class and where in the OperandList they were defined.
/// This directly corresponds to the tokenized AsmString after the mnemonic is
/// removed.
SmallVector<AsmOperand, 8> AsmOperands;
/// Predicates - The required subtarget features to match this instruction.
SmallVector<const SubtargetFeatureInfo *, 4> RequiredFeatures;
/// ConversionFnKind - The enum value which is passed to the generated
/// convertToMCInst to convert parsed operands into an MCInst for this
/// function.
std::string ConversionFnKind;
/// If this instruction is deprecated in some form.
bool HasDeprecation;
/// If this is an alias, this is use to determine whether or not to using
/// the conversion function defined by the instruction's AsmMatchConverter
/// or to use the function generated by the alias.
bool UseInstAsmMatchConverter;
MatchableInfo(const CodeGenInstruction &CGI)
: AsmVariantID(0), AsmString(CGI.AsmString), TheDef(CGI.TheDef), DefRec(&CGI),
UseInstAsmMatchConverter(true) {
}
MatchableInfo(std::unique_ptr<const CodeGenInstAlias> Alias)
: AsmVariantID(0), AsmString(Alias->AsmString), TheDef(Alias->TheDef),
DefRec(Alias.release()),
UseInstAsmMatchConverter(
TheDef->getValueAsBit("UseInstAsmMatchConverter")) {
}
~MatchableInfo() {
delete DefRec.dyn_cast<const CodeGenInstAlias*>();
}
// Two-operand aliases clone from the main matchable, but mark the second
// operand as a tied operand of the first for purposes of the assembler.
void formTwoOperandAlias(StringRef Constraint);
void initialize(const AsmMatcherInfo &Info,
SmallPtrSetImpl<Record*> &SingletonRegisters,
int AsmVariantNo, std::string &RegisterPrefix);
/// validate - Return true if this matchable is a valid thing to match against
/// and perform a bunch of validity checking.
bool validate(StringRef CommentDelimiter, bool Hack) const;
/// extractSingletonRegisterForAsmOperand - Extract singleton register,
/// if present, from specified token.
void
extractSingletonRegisterForAsmOperand(unsigned i, const AsmMatcherInfo &Info,
std::string &RegisterPrefix);
/// findAsmOperand - Find the AsmOperand with the specified name and
/// suboperand index.
int findAsmOperand(StringRef N, int SubOpIdx) const {
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i)
if (N == AsmOperands[i].SrcOpName &&
SubOpIdx == AsmOperands[i].SubOpIdx)
return i;
return -1;
}
/// findAsmOperandNamed - Find the first AsmOperand with the specified name.
/// This does not check the suboperand index.
int findAsmOperandNamed(StringRef N) const {
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i)
if (N == AsmOperands[i].SrcOpName)
return i;
return -1;
}
void buildInstructionResultOperands();
void buildAliasResultOperands();
/// operator< - Compare two matchables.
bool operator<(const MatchableInfo &RHS) const {
// The primary comparator is the instruction mnemonic.
if (Mnemonic != RHS.Mnemonic)
return Mnemonic < RHS.Mnemonic;
if (AsmOperands.size() != RHS.AsmOperands.size())
return AsmOperands.size() < RHS.AsmOperands.size();
// Compare lexicographically by operand. The matcher validates that other
// orderings wouldn't be ambiguous using \see couldMatchAmbiguouslyWith().
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
if (*AsmOperands[i].Class < *RHS.AsmOperands[i].Class)
return true;
if (*RHS.AsmOperands[i].Class < *AsmOperands[i].Class)
return false;
}
// Give matches that require more features higher precedence. This is useful
// because we cannot define AssemblerPredicates with the negation of
// processor features. For example, ARM v6 "nop" may be either a HINT or
// MOV. With v6, we want to match HINT. The assembler has no way to
// predicate MOV under "NoV6", but HINT will always match first because it
// requires V6 while MOV does not.
if (RequiredFeatures.size() != RHS.RequiredFeatures.size())
return RequiredFeatures.size() > RHS.RequiredFeatures.size();
return false;
}
/// couldMatchAmbiguouslyWith - Check whether this matchable could
/// ambiguously match the same set of operands as \p RHS (without being a
/// strictly superior match).
bool couldMatchAmbiguouslyWith(const MatchableInfo &RHS) const {
// The primary comparator is the instruction mnemonic.
if (Mnemonic != RHS.Mnemonic)
return false;
// The number of operands is unambiguous.
if (AsmOperands.size() != RHS.AsmOperands.size())
return false;
// Otherwise, make sure the ordering of the two instructions is unambiguous
// by checking that either (a) a token or operand kind discriminates them,
// or (b) the ordering among equivalent kinds is consistent.
// Tokens and operand kinds are unambiguous (assuming a correct target
// specific parser).
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i)
if (AsmOperands[i].Class->Kind != RHS.AsmOperands[i].Class->Kind ||
AsmOperands[i].Class->Kind == ClassInfo::Token)
if (*AsmOperands[i].Class < *RHS.AsmOperands[i].Class ||
*RHS.AsmOperands[i].Class < *AsmOperands[i].Class)
return false;
// Otherwise, this operand could commute if all operands are equivalent, or
// there is a pair of operands that compare less than and a pair that
// compare greater than.
bool HasLT = false, HasGT = false;
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
if (*AsmOperands[i].Class < *RHS.AsmOperands[i].Class)
HasLT = true;
if (*RHS.AsmOperands[i].Class < *AsmOperands[i].Class)
HasGT = true;
}
return !(HasLT ^ HasGT);
}
void dump() const;
private:
void tokenizeAsmString(const AsmMatcherInfo &Info);
void addAsmOperand(size_t Start, size_t End);
};
/// SubtargetFeatureInfo - Helper class for storing information on a subtarget
/// feature which participates in instruction matching.
struct SubtargetFeatureInfo {
/// \brief The predicate record for this feature.
Record *TheDef;
/// \brief An unique index assigned to represent this feature.
uint64_t Index;
SubtargetFeatureInfo(Record *D, uint64_t Idx) : TheDef(D), Index(Idx) {}
/// \brief The name of the enumerated constant identifying this feature.
std::string getEnumName() const {
return "Feature_" + TheDef->getName();
}
void dump() const {
errs() << getEnumName() << " " << Index << "\n";
TheDef->dump();
}
};
struct OperandMatchEntry {
unsigned OperandMask;
const MatchableInfo* MI;
ClassInfo *CI;
static OperandMatchEntry create(const MatchableInfo *mi, ClassInfo *ci,
unsigned opMask) {
OperandMatchEntry X;
X.OperandMask = opMask;
X.CI = ci;
X.MI = mi;
return X;
}
};
class AsmMatcherInfo {
public:
/// Tracked Records
RecordKeeper &Records;
/// The tablegen AsmParser record.
Record *AsmParser;
/// Target - The target information.
CodeGenTarget &Target;
/// The classes which are needed for matching.
std::forward_list<ClassInfo> Classes;
/// The information on the matchables to match.
std::vector<std::unique_ptr<MatchableInfo>> Matchables;
/// Info for custom matching operands by user defined methods.
std::vector<OperandMatchEntry> OperandMatchInfo;
/// Map of Register records to their class information.
typedef std::map<Record*, ClassInfo*, LessRecordByID> RegisterClassesTy;
RegisterClassesTy RegisterClasses;
/// Map of Predicate records to their subtarget information.
std::map<Record *, SubtargetFeatureInfo, LessRecordByID> SubtargetFeatures;
/// Map of AsmOperandClass records to their class information.
std::map<Record*, ClassInfo*> AsmOperandClasses;
private:
/// Map of token to class information which has already been constructed.
std::map<std::string, ClassInfo*> TokenClasses;
/// Map of RegisterClass records to their class information.
std::map<Record*, ClassInfo*> RegisterClassClasses;
private:
/// getTokenClass - Lookup or create the class for the given token.
ClassInfo *getTokenClass(StringRef Token);
/// getOperandClass - Lookup or create the class for the given operand.
ClassInfo *getOperandClass(const CGIOperandList::OperandInfo &OI,
int SubOpIdx);
ClassInfo *getOperandClass(Record *Rec, int SubOpIdx);
/// buildRegisterClasses - Build the ClassInfo* instances for register
/// classes.
void buildRegisterClasses(SmallPtrSetImpl<Record*> &SingletonRegisters);
/// buildOperandClasses - Build the ClassInfo* instances for user defined
/// operand classes.
void buildOperandClasses();
void buildInstructionOperandReference(MatchableInfo *II, StringRef OpName,
unsigned AsmOpIdx);
void buildAliasOperandReference(MatchableInfo *II, StringRef OpName,
MatchableInfo::AsmOperand &Op);
public:
AsmMatcherInfo(Record *AsmParser,
CodeGenTarget &Target,
RecordKeeper &Records);
/// buildInfo - Construct the various tables used during matching.
void buildInfo();
/// buildOperandMatchInfo - Build the necessary information to handle user
/// defined operand parsing methods.
void buildOperandMatchInfo();
/// getSubtargetFeature - Lookup or create the subtarget feature info for the
/// given operand.
const SubtargetFeatureInfo *getSubtargetFeature(Record *Def) const {
assert(Def->isSubClassOf("Predicate") && "Invalid predicate type!");
const auto &I = SubtargetFeatures.find(Def);
return I == SubtargetFeatures.end() ? nullptr : &I->second;
}
RecordKeeper &getRecords() const {
return Records;
}
};
} // End anonymous namespace
void MatchableInfo::dump() const {
errs() << TheDef->getName() << " -- " << "flattened:\"" << AsmString <<"\"\n";
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
const AsmOperand &Op = AsmOperands[i];
errs() << " op[" << i << "] = " << Op.Class->ClassName << " - ";
errs() << '\"' << Op.Token << "\"\n";
}
}
static std::pair<StringRef, StringRef>
parseTwoOperandConstraint(StringRef S, ArrayRef<SMLoc> Loc) {
// Split via the '='.
std::pair<StringRef, StringRef> Ops = S.split('=');
if (Ops.second == "")
PrintFatalError(Loc, "missing '=' in two-operand alias constraint");
// Trim whitespace and the leading '$' on the operand names.
size_t start = Ops.first.find_first_of('$');
if (start == std::string::npos)
PrintFatalError(Loc, "expected '$' prefix on asm operand name");
Ops.first = Ops.first.slice(start + 1, std::string::npos);
size_t end = Ops.first.find_last_of(" \t");
Ops.first = Ops.first.slice(0, end);
// Now the second operand.
start = Ops.second.find_first_of('$');
if (start == std::string::npos)
PrintFatalError(Loc, "expected '$' prefix on asm operand name");
Ops.second = Ops.second.slice(start + 1, std::string::npos);
end = Ops.second.find_last_of(" \t");
Ops.first = Ops.first.slice(0, end);
return Ops;
}
void MatchableInfo::formTwoOperandAlias(StringRef Constraint) {
// Figure out which operands are aliased and mark them as tied.
std::pair<StringRef, StringRef> Ops =
parseTwoOperandConstraint(Constraint, TheDef->getLoc());
// Find the AsmOperands that refer to the operands we're aliasing.
int SrcAsmOperand = findAsmOperandNamed(Ops.first);
int DstAsmOperand = findAsmOperandNamed(Ops.second);
if (SrcAsmOperand == -1)
PrintFatalError(TheDef->getLoc(),
"unknown source two-operand alias operand '" + Ops.first +
"'.");
if (DstAsmOperand == -1)
PrintFatalError(TheDef->getLoc(),
"unknown destination two-operand alias operand '" +
Ops.second + "'.");
// Find the ResOperand that refers to the operand we're aliasing away
// and update it to refer to the combined operand instead.
for (unsigned i = 0, e = ResOperands.size(); i != e; ++i) {
ResOperand &Op = ResOperands[i];
if (Op.Kind == ResOperand::RenderAsmOperand &&
Op.AsmOperandNum == (unsigned)SrcAsmOperand) {
Op.AsmOperandNum = DstAsmOperand;
break;
}
}
// Remove the AsmOperand for the alias operand.
AsmOperands.erase(AsmOperands.begin() + SrcAsmOperand);
// Adjust the ResOperand references to any AsmOperands that followed
// the one we just deleted.
for (unsigned i = 0, e = ResOperands.size(); i != e; ++i) {
ResOperand &Op = ResOperands[i];
switch(Op.Kind) {
default:
// Nothing to do for operands that don't reference AsmOperands.
break;
case ResOperand::RenderAsmOperand:
if (Op.AsmOperandNum > (unsigned)SrcAsmOperand)
--Op.AsmOperandNum;
break;
case ResOperand::TiedOperand:
if (Op.TiedOperandNum > (unsigned)SrcAsmOperand)
--Op.TiedOperandNum;
break;
}
}
}
void MatchableInfo::initialize(const AsmMatcherInfo &Info,
SmallPtrSetImpl<Record*> &SingletonRegisters,
int AsmVariantNo, std::string &RegisterPrefix) {
AsmVariantID = AsmVariantNo;
AsmString =
CodeGenInstruction::FlattenAsmStringVariants(AsmString, AsmVariantNo);
tokenizeAsmString(Info);
// Compute the require features.
std::vector<Record*> Predicates =TheDef->getValueAsListOfDefs("Predicates");
for (unsigned i = 0, e = Predicates.size(); i != e; ++i)
if (const SubtargetFeatureInfo *Feature =
Info.getSubtargetFeature(Predicates[i]))
RequiredFeatures.push_back(Feature);
// Collect singleton registers, if used.
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
extractSingletonRegisterForAsmOperand(i, Info, RegisterPrefix);
if (Record *Reg = AsmOperands[i].SingletonReg)
SingletonRegisters.insert(Reg);
}
const RecordVal *DepMask = TheDef->getValue("DeprecatedFeatureMask");
if (!DepMask)
DepMask = TheDef->getValue("ComplexDeprecationPredicate");
HasDeprecation =
DepMask ? !DepMask->getValue()->getAsUnquotedString().empty() : false;
}
/// Append an AsmOperand for the given substring of AsmString.
void MatchableInfo::addAsmOperand(size_t Start, size_t End) {
StringRef String = AsmString;
StringRef Separators = "[]*! \t,";
// Look for separators before and after to figure out is this token is
// isolated. Accept '$$' as that's how we escape '$'.
bool IsIsolatedToken =
(!Start || Separators.find(String[Start - 1]) != StringRef::npos ||
String.substr(Start - 1, 2) == "$$") &&
(End >= String.size() || Separators.find(String[End]) != StringRef::npos);
AsmOperands.push_back(AsmOperand(IsIsolatedToken, String.slice(Start, End)));
}
/// tokenizeAsmString - Tokenize a simplified assembly string.
void MatchableInfo::tokenizeAsmString(const AsmMatcherInfo &Info) {
StringRef String = AsmString;
unsigned Prev = 0;
bool InTok = true;
for (unsigned i = 0, e = String.size(); i != e; ++i) {
switch (String[i]) {
case '[':
case ']':
case '*':
case '!':
case ' ':
case '\t':
case ',':
if (InTok) {
addAsmOperand(Prev, i);
InTok = false;
}
if (!isspace(String[i]) && String[i] != ',')
addAsmOperand(i, i + 1);
Prev = i + 1;
break;
case '\\':
if (InTok) {
addAsmOperand(Prev, i);
InTok = false;
}
++i;
assert(i != String.size() && "Invalid quoted character");
addAsmOperand(i, i + 1);
Prev = i + 1;
break;
case '$': {
if (InTok) {
addAsmOperand(Prev, i);
InTok = false;
}
// If this isn't "${", treat like a normal token.
if (i + 1 == String.size() || String[i + 1] != '{') {
Prev = i;
break;
}
StringRef::iterator End = std::find(String.begin() + i, String.end(),'}');
assert(End != String.end() && "Missing brace in operand reference!");
size_t EndPos = End - String.begin();
addAsmOperand(i, EndPos+1);
Prev = EndPos + 1;
i = EndPos;
break;
}
case '.':
if (!Info.AsmParser->getValueAsBit("MnemonicContainsDot")) {
if (InTok)
addAsmOperand(Prev, i);
Prev = i;
}
InTok = true;
break;
default:
InTok = true;
}
}
if (InTok && Prev != String.size())
addAsmOperand(Prev, StringRef::npos);
// The first token of the instruction is the mnemonic, which must be a
// simple string, not a $foo variable or a singleton register.
if (AsmOperands.empty())
PrintFatalError(TheDef->getLoc(),
"Instruction '" + TheDef->getName() + "' has no tokens");
Mnemonic = AsmOperands[0].Token;
if (Mnemonic.empty())
PrintFatalError(TheDef->getLoc(),
"Missing instruction mnemonic");
// FIXME : Check and raise an error if it is a register.
if (Mnemonic[0] == '$')
PrintFatalError(TheDef->getLoc(),
"Invalid instruction mnemonic '" + Mnemonic + "'!");
// Remove the first operand, it is tracked in the mnemonic field.
AsmOperands.erase(AsmOperands.begin());
}
bool MatchableInfo::validate(StringRef CommentDelimiter, bool Hack) const {
// Reject matchables with no .s string.
if (AsmString.empty())
PrintFatalError(TheDef->getLoc(), "instruction with empty asm string");
// Reject any matchables with a newline in them, they should be marked
// isCodeGenOnly if they are pseudo instructions.
if (AsmString.find('\n') != std::string::npos)
PrintFatalError(TheDef->getLoc(),
"multiline instruction is not valid for the asmparser, "
"mark it isCodeGenOnly");
// Remove comments from the asm string. We know that the asmstring only
// has one line.
if (!CommentDelimiter.empty() &&
StringRef(AsmString).find(CommentDelimiter) != StringRef::npos)
PrintFatalError(TheDef->getLoc(),
"asmstring for instruction has comment character in it, "
"mark it isCodeGenOnly");
// Reject matchables with operand modifiers, these aren't something we can
// handle, the target should be refactored to use operands instead of
// modifiers.
//
// Also, check for instructions which reference the operand multiple times;
// this implies a constraint we would not honor.
std::set<std::string> OperandNames;
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
StringRef Tok = AsmOperands[i].Token;
if (Tok[0] == '$' && Tok.find(':') != StringRef::npos)
PrintFatalError(TheDef->getLoc(),
"matchable with operand modifier '" + Tok +
"' not supported by asm matcher. Mark isCodeGenOnly!");
// Verify that any operand is only mentioned once.
// We reject aliases and ignore instructions for now.
if (Tok[0] == '$' && !OperandNames.insert(Tok).second) {
if (!Hack)
PrintFatalError(TheDef->getLoc(),
"ERROR: matchable with tied operand '" + Tok +
"' can never be matched!");
// FIXME: Should reject these. The ARM backend hits this with $lane in a
// bunch of instructions. It is unclear what the right answer is.
DEBUG({
errs() << "warning: '" << TheDef->getName() << "': "
<< "ignoring instruction with tied operand '"
<< Tok << "'\n";
});
return false;
}
}
return true;
}
/// extractSingletonRegisterForAsmOperand - Extract singleton register,
/// if present, from specified token.
void MatchableInfo::
extractSingletonRegisterForAsmOperand(unsigned OperandNo,
const AsmMatcherInfo &Info,
std::string &RegisterPrefix) {
StringRef Tok = AsmOperands[OperandNo].Token;
// If this token is not an isolated token, i.e., it isn't separated from
// other tokens (e.g. with whitespace), don't interpret it as a register name.
if (!AsmOperands[OperandNo].IsIsolatedToken)
return;
if (RegisterPrefix.empty()) {
std::string LoweredTok = Tok.lower();
if (const CodeGenRegister *Reg = Info.Target.getRegisterByName(LoweredTok))
AsmOperands[OperandNo].SingletonReg = Reg->TheDef;
return;
}
if (!Tok.startswith(RegisterPrefix))
return;
StringRef RegName = Tok.substr(RegisterPrefix.size());
if (const CodeGenRegister *Reg = Info.Target.getRegisterByName(RegName))
AsmOperands[OperandNo].SingletonReg = Reg->TheDef;
// If there is no register prefix (i.e. "%" in "%eax"), then this may
// be some random non-register token, just ignore it.
return;
}
static std::string getEnumNameForToken(StringRef Str) {
std::string Res;
for (StringRef::iterator it = Str.begin(), ie = Str.end(); it != ie; ++it) {
switch (*it) {
case '*': Res += "_STAR_"; break;
case '%': Res += "_PCT_"; break;
case ':': Res += "_COLON_"; break;
case '!': Res += "_EXCLAIM_"; break;
case '.': Res += "_DOT_"; break;
case '<': Res += "_LT_"; break;
case '>': Res += "_GT_"; break;
case '-': Res += "_MINUS_"; break;
default:
if ((*it >= 'A' && *it <= 'Z') ||
(*it >= 'a' && *it <= 'z') ||
(*it >= '0' && *it <= '9'))
Res += *it;
else
Res += "_" + utostr((unsigned) *it) + "_";
}
}
return Res;
}
ClassInfo *AsmMatcherInfo::getTokenClass(StringRef Token) {
ClassInfo *&Entry = TokenClasses[Token];
if (!Entry) {
Classes.emplace_front();
Entry = &Classes.front();
Entry->Kind = ClassInfo::Token;
Entry->ClassName = "Token";
Entry->Name = "MCK_" + getEnumNameForToken(Token);
Entry->ValueName = Token;
Entry->PredicateMethod = "<invalid>";
Entry->RenderMethod = "<invalid>";
Entry->ParserMethod = "";
Entry->DiagnosticType = "";
}
return Entry;
}
ClassInfo *
AsmMatcherInfo::getOperandClass(const CGIOperandList::OperandInfo &OI,
int SubOpIdx) {
Record *Rec = OI.Rec;
if (SubOpIdx != -1)
Rec = cast<DefInit>(OI.MIOperandInfo->getArg(SubOpIdx))->getDef();
return getOperandClass(Rec, SubOpIdx);
}
ClassInfo *
AsmMatcherInfo::getOperandClass(Record *Rec, int SubOpIdx) {
if (Rec->isSubClassOf("RegisterOperand")) {
// RegisterOperand may have an associated ParserMatchClass. If it does,
// use it, else just fall back to the underlying register class.
const RecordVal *R = Rec->getValue("ParserMatchClass");
if (!R || !R->getValue())
PrintFatalError("Record `" + Rec->getName() +
"' does not have a ParserMatchClass!\n");
if (DefInit *DI= dyn_cast<DefInit>(R->getValue())) {
Record *MatchClass = DI->getDef();
if (ClassInfo *CI = AsmOperandClasses[MatchClass])
return CI;
}
// No custom match class. Just use the register class.
Record *ClassRec = Rec->getValueAsDef("RegClass");
if (!ClassRec)
PrintFatalError(Rec->getLoc(), "RegisterOperand `" + Rec->getName() +
"' has no associated register class!\n");
if (ClassInfo *CI = RegisterClassClasses[ClassRec])
return CI;
PrintFatalError(Rec->getLoc(), "register class has no class info!");
}
if (Rec->isSubClassOf("RegisterClass")) {
if (ClassInfo *CI = RegisterClassClasses[Rec])
return CI;
PrintFatalError(Rec->getLoc(), "register class has no class info!");
}
if (!Rec->isSubClassOf("Operand"))
PrintFatalError(Rec->getLoc(), "Operand `" + Rec->getName() +
"' does not derive from class Operand!\n");
Record *MatchClass = Rec->getValueAsDef("ParserMatchClass");
if (ClassInfo *CI = AsmOperandClasses[MatchClass])
return CI;
PrintFatalError(Rec->getLoc(), "operand has no match class!");
}
struct LessRegisterSet {
bool operator() (const RegisterSet &LHS, const RegisterSet & RHS) const {
// std::set<T> defines its own compariso "operator<", but it
// performs a lexicographical comparison by T's innate comparison
// for some reason. We don't want non-deterministic pointer
// comparisons so use this instead.
return std::lexicographical_compare(LHS.begin(), LHS.end(),
RHS.begin(), RHS.end(),
LessRecordByID());
}
};
void AsmMatcherInfo::
buildRegisterClasses(SmallPtrSetImpl<Record*> &SingletonRegisters) {
const auto &Registers = Target.getRegBank().getRegisters();
auto &RegClassList = Target.getRegBank().getRegClasses();
typedef std::set<RegisterSet, LessRegisterSet> RegisterSetSet;
// The register sets used for matching.
RegisterSetSet RegisterSets;
// Gather the defined sets.
for (const CodeGenRegisterClass &RC : RegClassList)
RegisterSets.insert(
RegisterSet(RC.getOrder().begin(), RC.getOrder().end()));
// Add any required singleton sets.
for (Record *Rec : SingletonRegisters) {
RegisterSets.insert(RegisterSet(&Rec, &Rec + 1));
}
// Introduce derived sets where necessary (when a register does not determine
// a unique register set class), and build the mapping of registers to the set
// they should classify to.
std::map<Record*, RegisterSet> RegisterMap;
for (const CodeGenRegister &CGR : Registers) {
// Compute the intersection of all sets containing this register.
RegisterSet ContainingSet;
for (const RegisterSet &RS : RegisterSets) {
if (!RS.count(CGR.TheDef))
continue;
if (ContainingSet.empty()) {
ContainingSet = RS;
continue;
}
RegisterSet Tmp;
std::swap(Tmp, ContainingSet);
std::insert_iterator<RegisterSet> II(ContainingSet,
ContainingSet.begin());
std::set_intersection(Tmp.begin(), Tmp.end(), RS.begin(), RS.end(), II,
LessRecordByID());
}
if (!ContainingSet.empty()) {
RegisterSets.insert(ContainingSet);
RegisterMap.insert(std::make_pair(CGR.TheDef, ContainingSet));
}
}
// Construct the register classes.
std::map<RegisterSet, ClassInfo*, LessRegisterSet> RegisterSetClasses;
unsigned Index = 0;
for (const RegisterSet &RS : RegisterSets) {
Classes.emplace_front();
ClassInfo *CI = &Classes.front();
CI->Kind = ClassInfo::RegisterClass0 + Index;
CI->ClassName = "Reg" + utostr(Index);
CI->Name = "MCK_Reg" + utostr(Index);
CI->ValueName = "";
CI->PredicateMethod = ""; // unused
CI->RenderMethod = "addRegOperands";
CI->Registers = RS;
// FIXME: diagnostic type.
CI->DiagnosticType = "";
RegisterSetClasses.insert(std::make_pair(RS, CI));
++Index;
}
// Find the superclasses; we could compute only the subgroup lattice edges,
// but there isn't really a point.
for (const RegisterSet &RS : RegisterSets) {
ClassInfo *CI = RegisterSetClasses[RS];
for (const RegisterSet &RS2 : RegisterSets)
if (RS != RS2 &&
std::includes(RS2.begin(), RS2.end(), RS.begin(), RS.end(),
LessRecordByID()))
CI->SuperClasses.push_back(RegisterSetClasses[RS2]);
}
// Name the register classes which correspond to a user defined RegisterClass.
for (const CodeGenRegisterClass &RC : RegClassList) {
// Def will be NULL for non-user defined register classes.
Record *Def = RC.getDef();
if (!Def)
continue;
ClassInfo *CI = RegisterSetClasses[RegisterSet(RC.getOrder().begin(),
RC.getOrder().end())];
if (CI->ValueName.empty()) {
CI->ClassName = RC.getName();
CI->Name = "MCK_" + RC.getName();
CI->ValueName = RC.getName();
} else
CI->ValueName = CI->ValueName + "," + RC.getName();
RegisterClassClasses.insert(std::make_pair(Def, CI));
}
// Populate the map for individual registers.
for (std::map<Record*, RegisterSet>::iterator it = RegisterMap.begin(),
ie = RegisterMap.end(); it != ie; ++it)
RegisterClasses[it->first] = RegisterSetClasses[it->second];
// Name the register classes which correspond to singleton registers.
for (Record *Rec : SingletonRegisters) {
ClassInfo *CI = RegisterClasses[Rec];
assert(CI && "Missing singleton register class info!");
if (CI->ValueName.empty()) {
CI->ClassName = Rec->getName();
CI->Name = "MCK_" + Rec->getName();
CI->ValueName = Rec->getName();
} else
CI->ValueName = CI->ValueName + "," + Rec->getName();
}
}
void AsmMatcherInfo::buildOperandClasses() {
std::vector<Record*> AsmOperands =
Records.getAllDerivedDefinitions("AsmOperandClass");
// Pre-populate AsmOperandClasses map.
for (Record *Rec : AsmOperands) {
Classes.emplace_front();
AsmOperandClasses[Rec] = &Classes.front();
}
unsigned Index = 0;
for (Record *Rec : AsmOperands) {
ClassInfo *CI = AsmOperandClasses[Rec];
CI->Kind = ClassInfo::UserClass0 + Index;
ListInit *Supers = Rec->getValueAsListInit("SuperClasses");
for (Init *I : Supers->getValues()) {
DefInit *DI = dyn_cast<DefInit>(I);
if (!DI) {
PrintError(Rec->getLoc(), "Invalid super class reference!");
continue;
}
ClassInfo *SC = AsmOperandClasses[DI->getDef()];
if (!SC)
PrintError(Rec->getLoc(), "Invalid super class reference!");
else
CI->SuperClasses.push_back(SC);
}
CI->ClassName = Rec->getValueAsString("Name");
CI->Name = "MCK_" + CI->ClassName;
CI->ValueName = Rec->getName();
// Get or construct the predicate method name.
Init *PMName = Rec->getValueInit("PredicateMethod");
if (StringInit *SI = dyn_cast<StringInit>(PMName)) {
CI->PredicateMethod = SI->getValue();
} else {
assert(isa<UnsetInit>(PMName) && "Unexpected PredicateMethod field!");
CI->PredicateMethod = "is" + CI->ClassName;
}
// Get or construct the render method name.
Init *RMName = Rec->getValueInit("RenderMethod");
if (StringInit *SI = dyn_cast<StringInit>(RMName)) {
CI->RenderMethod = SI->getValue();
} else {
assert(isa<UnsetInit>(RMName) && "Unexpected RenderMethod field!");
CI->RenderMethod = "add" + CI->ClassName + "Operands";
}
// Get the parse method name or leave it as empty.
Init *PRMName = Rec->getValueInit("ParserMethod");
if (StringInit *SI = dyn_cast<StringInit>(PRMName))
CI->ParserMethod = SI->getValue();
// Get the diagnostic type or leave it as empty.
// Get the parse method name or leave it as empty.
Init *DiagnosticType = Rec->getValueInit("DiagnosticType");
if (StringInit *SI = dyn_cast<StringInit>(DiagnosticType))
CI->DiagnosticType = SI->getValue();
++Index;
}
}
AsmMatcherInfo::AsmMatcherInfo(Record *asmParser,
CodeGenTarget &target,
RecordKeeper &records)
: Records(records), AsmParser(asmParser), Target(target) {
}
/// buildOperandMatchInfo - Build the necessary information to handle user
/// defined operand parsing methods.
void AsmMatcherInfo::buildOperandMatchInfo() {
/// Map containing a mask with all operands indices that can be found for
/// that class inside a instruction.
typedef std::map<ClassInfo *, unsigned, less_ptr<ClassInfo>> OpClassMaskTy;
OpClassMaskTy OpClassMask;
for (const auto &MI : Matchables) {
OpClassMask.clear();
// Keep track of all operands of this instructions which belong to the
// same class.
for (unsigned i = 0, e = MI->AsmOperands.size(); i != e; ++i) {
const MatchableInfo::AsmOperand &Op = MI->AsmOperands[i];
if (Op.Class->ParserMethod.empty())
continue;
unsigned &OperandMask = OpClassMask[Op.Class];
OperandMask |= (1 << i);
}
// Generate operand match info for each mnemonic/operand class pair.
for (const auto &OCM : OpClassMask) {
unsigned OpMask = OCM.second;
ClassInfo *CI = OCM.first;
OperandMatchInfo.push_back(OperandMatchEntry::create(MI.get(), CI,
OpMask));
}
}
}
void AsmMatcherInfo::buildInfo() {
// Build information about all of the AssemblerPredicates.
std::vector<Record*> AllPredicates =
Records.getAllDerivedDefinitions("Predicate");
for (unsigned i = 0, e = AllPredicates.size(); i != e; ++i) {
Record *Pred = AllPredicates[i];
// Ignore predicates that are not intended for the assembler.
if (!Pred->getValueAsBit("AssemblerMatcherPredicate"))
continue;
if (Pred->getName().empty())
PrintFatalError(Pred->getLoc(), "Predicate has no name!");
SubtargetFeatures.insert(std::make_pair(
Pred, SubtargetFeatureInfo(Pred, SubtargetFeatures.size())));
DEBUG(SubtargetFeatures.find(Pred)->second.dump());
assert(SubtargetFeatures.size() <= 64 && "Too many subtarget features!");
}
// Parse the instructions; we need to do this first so that we can gather the
// singleton register classes.
SmallPtrSet<Record*, 16> SingletonRegisters;
unsigned VariantCount = Target.getAsmParserVariantCount();
for (unsigned VC = 0; VC != VariantCount; ++VC) {
Record *AsmVariant = Target.getAsmParserVariant(VC);
std::string CommentDelimiter =
AsmVariant->getValueAsString("CommentDelimiter");
std::string RegisterPrefix = AsmVariant->getValueAsString("RegisterPrefix");
int AsmVariantNo = AsmVariant->getValueAsInt("Variant");
for (const CodeGenInstruction *CGI : Target.instructions()) {
// If the tblgen -match-prefix option is specified (for tblgen hackers),
// filter the set of instructions we consider.
if (!StringRef(CGI->TheDef->getName()).startswith(MatchPrefix))
continue;
// Ignore "codegen only" instructions.
if (CGI->TheDef->getValueAsBit("isCodeGenOnly"))
continue;
std::unique_ptr<MatchableInfo> II(new MatchableInfo(*CGI));
II->initialize(*this, SingletonRegisters, AsmVariantNo, RegisterPrefix);
// Ignore instructions which shouldn't be matched and diagnose invalid
// instruction definitions with an error.
if (!II->validate(CommentDelimiter, true))
continue;
Matchables.push_back(std::move(II));
}
// Parse all of the InstAlias definitions and stick them in the list of
// matchables.
std::vector<Record*> AllInstAliases =
Records.getAllDerivedDefinitions("InstAlias");
for (unsigned i = 0, e = AllInstAliases.size(); i != e; ++i) {
auto Alias = llvm::make_unique<CodeGenInstAlias>(AllInstAliases[i],
AsmVariantNo, Target);
// If the tblgen -match-prefix option is specified (for tblgen hackers),
// filter the set of instruction aliases we consider, based on the target
// instruction.
if (!StringRef(Alias->ResultInst->TheDef->getName())
.startswith( MatchPrefix))
continue;
std::unique_ptr<MatchableInfo> II(new MatchableInfo(std::move(Alias)));
II->initialize(*this, SingletonRegisters, AsmVariantNo, RegisterPrefix);
// Validate the alias definitions.
II->validate(CommentDelimiter, false);
Matchables.push_back(std::move(II));
}
}
// Build info for the register classes.
buildRegisterClasses(SingletonRegisters);
// Build info for the user defined assembly operand classes.
buildOperandClasses();
// Build the information about matchables, now that we have fully formed
// classes.
std::vector<std::unique_ptr<MatchableInfo>> NewMatchables;
for (auto &II : Matchables) {
// Parse the tokens after the mnemonic.
// Note: buildInstructionOperandReference may insert new AsmOperands, so
// don't precompute the loop bound.
for (unsigned i = 0; i != II->AsmOperands.size(); ++i) {
MatchableInfo::AsmOperand &Op = II->AsmOperands[i];
StringRef Token = Op.Token;
// Check for singleton registers.
if (Record *RegRecord = II->AsmOperands[i].SingletonReg) {
Op.Class = RegisterClasses[RegRecord];
assert(Op.Class && Op.Class->Registers.size() == 1 &&
"Unexpected class for singleton register");
continue;
}
// Check for simple tokens.
if (Token[0] != '$') {
Op.Class = getTokenClass(Token);
continue;
}
if (Token.size() > 1 && isdigit(Token[1])) {
Op.Class = getTokenClass(Token);
continue;
}
// Otherwise this is an operand reference.
StringRef OperandName;
if (Token[1] == '{')
OperandName = Token.substr(2, Token.size() - 3);
else
OperandName = Token.substr(1);
if (II->DefRec.is<const CodeGenInstruction*>())
buildInstructionOperandReference(II.get(), OperandName, i);
else
buildAliasOperandReference(II.get(), OperandName, Op);
}
if (II->DefRec.is<const CodeGenInstruction*>()) {
II->buildInstructionResultOperands();
// If the instruction has a two-operand alias, build up the
// matchable here. We'll add them in bulk at the end to avoid
// confusing this loop.
std::string Constraint =
II->TheDef->getValueAsString("TwoOperandAliasConstraint");
if (Constraint != "") {
// Start by making a copy of the original matchable.
std::unique_ptr<MatchableInfo> AliasII(new MatchableInfo(*II));
// Adjust it to be a two-operand alias.
AliasII->formTwoOperandAlias(Constraint);
// Add the alias to the matchables list.
NewMatchables.push_back(std::move(AliasII));
}
} else
II->buildAliasResultOperands();
}
if (!NewMatchables.empty())
Matchables.insert(Matchables.end(),
std::make_move_iterator(NewMatchables.begin()),
std::make_move_iterator(NewMatchables.end()));
// Process token alias definitions and set up the associated superclass
// information.
std::vector<Record*> AllTokenAliases =
Records.getAllDerivedDefinitions("TokenAlias");
for (unsigned i = 0, e = AllTokenAliases.size(); i != e; ++i) {
Record *Rec = AllTokenAliases[i];
ClassInfo *FromClass = getTokenClass(Rec->getValueAsString("FromToken"));
ClassInfo *ToClass = getTokenClass(Rec->getValueAsString("ToToken"));
if (FromClass == ToClass)
PrintFatalError(Rec->getLoc(),
"error: Destination value identical to source value.");
FromClass->SuperClasses.push_back(ToClass);
}
// Reorder classes so that classes precede super classes.
Classes.sort();
}
/// buildInstructionOperandReference - The specified operand is a reference to a
/// named operand such as $src. Resolve the Class and OperandInfo pointers.
void AsmMatcherInfo::
buildInstructionOperandReference(MatchableInfo *II,
StringRef OperandName,
unsigned AsmOpIdx) {
const CodeGenInstruction &CGI = *II->DefRec.get<const CodeGenInstruction*>();
const CGIOperandList &Operands = CGI.Operands;
MatchableInfo::AsmOperand *Op = &II->AsmOperands[AsmOpIdx];
// Map this token to an operand.
unsigned Idx;
if (!Operands.hasOperandNamed(OperandName, Idx))
PrintFatalError(II->TheDef->getLoc(),
"error: unable to find operand: '" + OperandName + "'");
// If the instruction operand has multiple suboperands, but the parser
// match class for the asm operand is still the default "ImmAsmOperand",
// then handle each suboperand separately.
if (Op->SubOpIdx == -1 && Operands[Idx].MINumOperands > 1) {
Record *Rec = Operands[Idx].Rec;
assert(Rec->isSubClassOf("Operand") && "Unexpected operand!");
Record *MatchClass = Rec->getValueAsDef("ParserMatchClass");
if (MatchClass && MatchClass->getValueAsString("Name") == "Imm") {
// Insert remaining suboperands after AsmOpIdx in II->AsmOperands.
StringRef Token = Op->Token; // save this in case Op gets moved
for (unsigned SI = 1, SE = Operands[Idx].MINumOperands; SI != SE; ++SI) {
MatchableInfo::AsmOperand NewAsmOp(/*IsIsolatedToken=*/true, Token);
NewAsmOp.SubOpIdx = SI;
II->AsmOperands.insert(II->AsmOperands.begin()+AsmOpIdx+SI, NewAsmOp);
}
// Replace Op with first suboperand.
Op = &II->AsmOperands[AsmOpIdx]; // update the pointer in case it moved
Op->SubOpIdx = 0;
}
}
// Set up the operand class.
Op->Class = getOperandClass(Operands[Idx], Op->SubOpIdx);
// If the named operand is tied, canonicalize it to the untied operand.
// For example, something like:
// (outs GPR:$dst), (ins GPR:$src)
// with an asmstring of
// "inc $src"
// we want to canonicalize to:
// "inc $dst"
// so that we know how to provide the $dst operand when filling in the result.
int OITied = -1;
if (Operands[Idx].MINumOperands == 1)
OITied = Operands[Idx].getTiedRegister();
if (OITied != -1) {
// The tied operand index is an MIOperand index, find the operand that
// contains it.
std::pair<unsigned, unsigned> Idx = Operands.getSubOperandNumber(OITied);
OperandName = Operands[Idx.first].Name;
Op->SubOpIdx = Idx.second;
}
Op->SrcOpName = OperandName;
}
/// buildAliasOperandReference - When parsing an operand reference out of the
/// matching string (e.g. "movsx $src, $dst"), determine what the class of the
/// operand reference is by looking it up in the result pattern definition.
void AsmMatcherInfo::buildAliasOperandReference(MatchableInfo *II,
StringRef OperandName,
MatchableInfo::AsmOperand &Op) {
const CodeGenInstAlias &CGA = *II->DefRec.get<const CodeGenInstAlias*>();
// Set up the operand class.
for (unsigned i = 0, e = CGA.ResultOperands.size(); i != e; ++i)
if (CGA.ResultOperands[i].isRecord() &&
CGA.ResultOperands[i].getName() == OperandName) {
// It's safe to go with the first one we find, because CodeGenInstAlias
// validates that all operands with the same name have the same record.
Op.SubOpIdx = CGA.ResultInstOperandIndex[i].second;
// Use the match class from the Alias definition, not the
// destination instruction, as we may have an immediate that's
// being munged by the match class.
Op.Class = getOperandClass(CGA.ResultOperands[i].getRecord(),
Op.SubOpIdx);
Op.SrcOpName = OperandName;
return;
}
PrintFatalError(II->TheDef->getLoc(),
"error: unable to find operand: '" + OperandName + "'");
}
void MatchableInfo::buildInstructionResultOperands() {
const CodeGenInstruction *ResultInst = getResultInst();
// Loop over all operands of the result instruction, determining how to
// populate them.
for (unsigned i = 0, e = ResultInst->Operands.size(); i != e; ++i) {
const CGIOperandList::OperandInfo &OpInfo = ResultInst->Operands[i];
// If this is a tied operand, just copy from the previously handled operand.
int TiedOp = -1;
if (OpInfo.MINumOperands == 1)
TiedOp = OpInfo.getTiedRegister();
if (TiedOp != -1) {
ResOperands.push_back(ResOperand::getTiedOp(TiedOp));
continue;
}
// Find out what operand from the asmparser this MCInst operand comes from.
int SrcOperand = findAsmOperandNamed(OpInfo.Name);
if (OpInfo.Name.empty() || SrcOperand == -1) {
// This may happen for operands that are tied to a suboperand of a
// complex operand. Simply use a dummy value here; nobody should
// use this operand slot.
// FIXME: The long term goal is for the MCOperand list to not contain
// tied operands at all.
ResOperands.push_back(ResOperand::getImmOp(0));
continue;
}
// Check if the one AsmOperand populates the entire operand.
unsigned NumOperands = OpInfo.MINumOperands;
if (AsmOperands[SrcOperand].SubOpIdx == -1) {
ResOperands.push_back(ResOperand::getRenderedOp(SrcOperand, NumOperands));
continue;
}
// Add a separate ResOperand for each suboperand.
for (unsigned AI = 0; AI < NumOperands; ++AI) {
assert(AsmOperands[SrcOperand+AI].SubOpIdx == (int)AI &&
AsmOperands[SrcOperand+AI].SrcOpName == OpInfo.Name &&
"unexpected AsmOperands for suboperands");
ResOperands.push_back(ResOperand::getRenderedOp(SrcOperand + AI, 1));
}
}
}
void MatchableInfo::buildAliasResultOperands() {
const CodeGenInstAlias &CGA = *DefRec.get<const CodeGenInstAlias*>();
const CodeGenInstruction *ResultInst = getResultInst();
// Loop over all operands of the result instruction, determining how to
// populate them.
unsigned AliasOpNo = 0;
unsigned LastOpNo = CGA.ResultInstOperandIndex.size();
for (unsigned i = 0, e = ResultInst->Operands.size(); i != e; ++i) {
const CGIOperandList::OperandInfo *OpInfo = &ResultInst->Operands[i];
// If this is a tied operand, just copy from the previously handled operand.
int TiedOp = -1;
if (OpInfo->MINumOperands == 1)
TiedOp = OpInfo->getTiedRegister();
if (TiedOp != -1) {
ResOperands.push_back(ResOperand::getTiedOp(TiedOp));
continue;
}
// Handle all the suboperands for this operand.
const std::string &OpName = OpInfo->Name;
for ( ; AliasOpNo < LastOpNo &&
CGA.ResultInstOperandIndex[AliasOpNo].first == i; ++AliasOpNo) {
int SubIdx = CGA.ResultInstOperandIndex[AliasOpNo].second;
// Find out what operand from the asmparser that this MCInst operand
// comes from.
switch (CGA.ResultOperands[AliasOpNo].Kind) {
case CodeGenInstAlias::ResultOperand::K_Record: {
StringRef Name = CGA.ResultOperands[AliasOpNo].getName();
int SrcOperand = findAsmOperand(Name, SubIdx);
if (SrcOperand == -1)
PrintFatalError(TheDef->getLoc(), "Instruction '" +
TheDef->getName() + "' has operand '" + OpName +
"' that doesn't appear in asm string!");
unsigned NumOperands = (SubIdx == -1 ? OpInfo->MINumOperands : 1);
ResOperands.push_back(ResOperand::getRenderedOp(SrcOperand,
NumOperands));
break;
}
case CodeGenInstAlias::ResultOperand::K_Imm: {
int64_t ImmVal = CGA.ResultOperands[AliasOpNo].getImm();
ResOperands.push_back(ResOperand::getImmOp(ImmVal));
break;
}
case CodeGenInstAlias::ResultOperand::K_Reg: {
Record *Reg = CGA.ResultOperands[AliasOpNo].getRegister();
ResOperands.push_back(ResOperand::getRegOp(Reg));
break;
}
}
}
}
}
static unsigned getConverterOperandID(const std::string &Name,
SetVector<std::string> &Table,
bool &IsNew) {
IsNew = Table.insert(Name);
unsigned ID = IsNew ? Table.size() - 1 :
std::find(Table.begin(), Table.end(), Name) - Table.begin();
assert(ID < Table.size());
return ID;
}
static void emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
std::vector<std::unique_ptr<MatchableInfo>> &Infos,
raw_ostream &OS) {
SetVector<std::string> OperandConversionKinds;
SetVector<std::string> InstructionConversionKinds;
std::vector<std::vector<uint8_t> > ConversionTable;
size_t MaxRowLength = 2; // minimum is custom converter plus terminator.
// TargetOperandClass - This is the target's operand class, like X86Operand.
std::string TargetOperandClass = Target.getName() + "Operand";
// Write the convert function to a separate stream, so we can drop it after
// the enum. We'll build up the conversion handlers for the individual
// operand types opportunistically as we encounter them.
std::string ConvertFnBody;
raw_string_ostream CvtOS(ConvertFnBody);
// Start the unified conversion function.
CvtOS << "void " << Target.getName() << ClassName << "::\n"
<< "convertToMCInst(unsigned Kind, MCInst &Inst, "
<< "unsigned Opcode,\n"
<< " const OperandVector"
<< " &Operands) {\n"
<< " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n"
<< " const uint8_t *Converter = ConversionTable[Kind];\n"
<< " Inst.setOpcode(Opcode);\n"
<< " for (const uint8_t *p = Converter; *p; p+= 2) {\n"
<< " switch (*p) {\n"
<< " default: llvm_unreachable(\"invalid conversion entry!\");\n"
<< " case CVT_Reg:\n"
<< " static_cast<" << TargetOperandClass
<< "&>(*Operands[*(p + 1)]).addRegOperands(Inst, 1);\n"
<< " break;\n"
<< " case CVT_Tied:\n"
<< " Inst.addOperand(Inst.getOperand(*(p + 1)));\n"
<< " break;\n";
std::string OperandFnBody;
raw_string_ostream OpOS(OperandFnBody);
// Start the operand number lookup function.
OpOS << "void " << Target.getName() << ClassName << "::\n"
<< "convertToMapAndConstraints(unsigned Kind,\n";
OpOS.indent(27);
OpOS << "const OperandVector &Operands) {\n"
<< " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n"
<< " unsigned NumMCOperands = 0;\n"
<< " const uint8_t *Converter = ConversionTable[Kind];\n"
<< " for (const uint8_t *p = Converter; *p; p+= 2) {\n"
<< " switch (*p) {\n"
<< " default: llvm_unreachable(\"invalid conversion entry!\");\n"
<< " case CVT_Reg:\n"
<< " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
<< " Operands[*(p + 1)]->setConstraint(\"r\");\n"
<< " ++NumMCOperands;\n"
<< " break;\n"
<< " case CVT_Tied:\n"
<< " ++NumMCOperands;\n"
<< " break;\n";
// Pre-populate the operand conversion kinds with the standard always
// available entries.
OperandConversionKinds.insert("CVT_Done");
OperandConversionKinds.insert("CVT_Reg");
OperandConversionKinds.insert("CVT_Tied");
enum { CVT_Done, CVT_Reg, CVT_Tied };
for (auto &II : Infos) {
// Check if we have a custom match function.
std::string AsmMatchConverter =
II->getResultInst()->TheDef->getValueAsString("AsmMatchConverter");
if (!AsmMatchConverter.empty() && II->UseInstAsmMatchConverter) {
std::string Signature = "ConvertCustom_" + AsmMatchConverter;
II->ConversionFnKind = Signature;
// Check if we have already generated this signature.
if (!InstructionConversionKinds.insert(Signature))
continue;
// Remember this converter for the kind enum.
unsigned KindID = OperandConversionKinds.size();
OperandConversionKinds.insert("CVT_" +
getEnumNameForToken(AsmMatchConverter));
// Add the converter row for this instruction.
ConversionTable.emplace_back();
ConversionTable.back().push_back(KindID);
ConversionTable.back().push_back(CVT_Done);
// Add the handler to the conversion driver function.
CvtOS << " case CVT_"
<< getEnumNameForToken(AsmMatchConverter) << ":\n"
<< " " << AsmMatchConverter << "(Inst, Operands);\n"
<< " break;\n";
// FIXME: Handle the operand number lookup for custom match functions.
continue;
}
// Build the conversion function signature.
std::string Signature = "Convert";
std::vector<uint8_t> ConversionRow;
// Compute the convert enum and the case body.
MaxRowLength = std::max(MaxRowLength, II->ResOperands.size()*2 + 1 );
for (unsigned i = 0, e = II->ResOperands.size(); i != e; ++i) {
const MatchableInfo::ResOperand &OpInfo = II->ResOperands[i];
// Generate code to populate each result operand.
switch (OpInfo.Kind) {
case MatchableInfo::ResOperand::RenderAsmOperand: {
// This comes from something we parsed.
const MatchableInfo::AsmOperand &Op =
II->AsmOperands[OpInfo.AsmOperandNum];
// Registers are always converted the same, don't duplicate the
// conversion function based on them.
Signature += "__";
std::string Class;
Class = Op.Class->isRegisterClass() ? "Reg" : Op.Class->ClassName;
Signature += Class;
Signature += utostr(OpInfo.MINumOperands);
Signature += "_" + itostr(OpInfo.AsmOperandNum);
// Add the conversion kind, if necessary, and get the associated ID
// the index of its entry in the vector).
std::string Name = "CVT_" + (Op.Class->isRegisterClass() ? "Reg" :
Op.Class->RenderMethod);
Name = getEnumNameForToken(Name);
bool IsNewConverter = false;
unsigned ID = getConverterOperandID(Name, OperandConversionKinds,
IsNewConverter);
// Add the operand entry to the instruction kind conversion row.
ConversionRow.push_back(ID);
ConversionRow.push_back(OpInfo.AsmOperandNum + 1);
if (!IsNewConverter)
break;
// This is a new operand kind. Add a handler for it to the
// converter driver.
CvtOS << " case " << Name << ":\n"
<< " static_cast<" << TargetOperandClass
<< "&>(*Operands[*(p + 1)])." << Op.Class->RenderMethod
<< "(Inst, " << OpInfo.MINumOperands << ");\n"
<< " break;\n";
// Add a handler for the operand number lookup.
OpOS << " case " << Name << ":\n"
<< " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n";
if (Op.Class->isRegisterClass())
OpOS << " Operands[*(p + 1)]->setConstraint(\"r\");\n";
else
OpOS << " Operands[*(p + 1)]->setConstraint(\"m\");\n";
OpOS << " NumMCOperands += " << OpInfo.MINumOperands << ";\n"
<< " break;\n";
break;
}
case MatchableInfo::ResOperand::TiedOperand: {
// If this operand is tied to a previous one, just copy the MCInst
// operand from the earlier one.We can only tie single MCOperand values.
assert(OpInfo.MINumOperands == 1 && "Not a singular MCOperand");
unsigned TiedOp = OpInfo.TiedOperandNum;
assert(i > TiedOp && "Tied operand precedes its target!");
Signature += "__Tie" + utostr(TiedOp);
ConversionRow.push_back(CVT_Tied);
ConversionRow.push_back(TiedOp);
break;
}
case MatchableInfo::ResOperand::ImmOperand: {
int64_t Val = OpInfo.ImmVal;
std::string Ty = "imm_" + itostr(Val);
Ty = getEnumNameForToken(Ty);
Signature += "__" + Ty;
std::string Name = "CVT_" + Ty;
bool IsNewConverter = false;
unsigned ID = getConverterOperandID(Name, OperandConversionKinds,
IsNewConverter);
// Add the operand entry to the instruction kind conversion row.
ConversionRow.push_back(ID);
ConversionRow.push_back(0);
if (!IsNewConverter)
break;
CvtOS << " case " << Name << ":\n"
<< " Inst.addOperand(MCOperand::createImm(" << Val << "));\n"
<< " break;\n";
OpOS << " case " << Name << ":\n"
<< " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
<< " Operands[*(p + 1)]->setConstraint(\"\");\n"
<< " ++NumMCOperands;\n"
<< " break;\n";
break;
}
case MatchableInfo::ResOperand::RegOperand: {
std::string Reg, Name;
if (!OpInfo.Register) {
Name = "reg0";
Reg = "0";
} else {
Reg = getQualifiedName(OpInfo.Register);
Name = "reg" + OpInfo.Register->getName();
}
Signature += "__" + Name;
Name = "CVT_" + Name;
bool IsNewConverter = false;
unsigned ID = getConverterOperandID(Name, OperandConversionKinds,
IsNewConverter);
// Add the operand entry to the instruction kind conversion row.
ConversionRow.push_back(ID);
ConversionRow.push_back(0);
if (!IsNewConverter)
break;
CvtOS << " case " << Name << ":\n"
<< " Inst.addOperand(MCOperand::createReg(" << Reg << "));\n"
<< " break;\n";
OpOS << " case " << Name << ":\n"
<< " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
<< " Operands[*(p + 1)]->setConstraint(\"m\");\n"
<< " ++NumMCOperands;\n"
<< " break;\n";
}
}
}
// If there were no operands, add to the signature to that effect
if (Signature == "Convert")
Signature += "_NoOperands";
II->ConversionFnKind = Signature;
// Save the signature. If we already have it, don't add a new row
// to the table.
if (!InstructionConversionKinds.insert(Signature))
continue;
// Add the row to the table.
ConversionTable.push_back(ConversionRow);
}
// Finish up the converter driver function.
CvtOS << " }\n }\n}\n\n";
// Finish up the operand number lookup function.
OpOS << " }\n }\n}\n\n";
OS << "namespace {\n";
// Output the operand conversion kind enum.
OS << "enum OperatorConversionKind {\n";
for (unsigned i = 0, e = OperandConversionKinds.size(); i != e; ++i)
OS << " " << OperandConversionKinds[i] << ",\n";
OS << " CVT_NUM_CONVERTERS\n";
OS << "};\n\n";
// Output the instruction conversion kind enum.
OS << "enum InstructionConversionKind {\n";
for (SetVector<std::string>::const_iterator
i = InstructionConversionKinds.begin(),
e = InstructionConversionKinds.end(); i != e; ++i)
OS << " " << *i << ",\n";
OS << " CVT_NUM_SIGNATURES\n";
OS << "};\n\n";
OS << "} // end anonymous namespace\n\n";
// Output the conversion table.
OS << "static const uint8_t ConversionTable[CVT_NUM_SIGNATURES]["
<< MaxRowLength << "] = {\n";
for (unsigned Row = 0, ERow = ConversionTable.size(); Row != ERow; ++Row) {
assert(ConversionTable[Row].size() % 2 == 0 && "bad conversion row!");
OS << " // " << InstructionConversionKinds[Row] << "\n";
OS << " { ";
for (unsigned i = 0, e = ConversionTable[Row].size(); i != e; i += 2)
OS << OperandConversionKinds[ConversionTable[Row][i]] << ", "
<< (unsigned)(ConversionTable[Row][i + 1]) << ", ";
OS << "CVT_Done },\n";
}
OS << "};\n\n";
// Spit out the conversion driver function.
OS << CvtOS.str();
// Spit out the operand number lookup function.
OS << OpOS.str();
}
/// emitMatchClassEnumeration - Emit the enumeration for match class kinds.
static void emitMatchClassEnumeration(CodeGenTarget &Target,
std::forward_list<ClassInfo> &Infos,
raw_ostream &OS) {
OS << "namespace {\n\n";
OS << "/// MatchClassKind - The kinds of classes which participate in\n"
<< "/// instruction matching.\n";
OS << "enum MatchClassKind {\n";
OS << " InvalidMatchClass = 0,\n";
for (const auto &CI : Infos) {
OS << " " << CI.Name << ", // ";
if (CI.Kind == ClassInfo::Token) {
OS << "'" << CI.ValueName << "'\n";
} else if (CI.isRegisterClass()) {
if (!CI.ValueName.empty())
OS << "register class '" << CI.ValueName << "'\n";
else
OS << "derived register class\n";
} else {
OS << "user defined class '" << CI.ValueName << "'\n";
}
}
OS << " NumMatchClassKinds\n";
OS << "};\n\n";
OS << "}\n\n";
}
/// emitValidateOperandClass - Emit the function to validate an operand class.
static void emitValidateOperandClass(AsmMatcherInfo &Info,
raw_ostream &OS) {
OS << "static unsigned validateOperandClass(MCParsedAsmOperand &GOp, "
<< "MatchClassKind Kind) {\n";
OS << " " << Info.Target.getName() << "Operand &Operand = ("
<< Info.Target.getName() << "Operand&)GOp;\n";
// The InvalidMatchClass is not to match any operand.
OS << " if (Kind == InvalidMatchClass)\n";
OS << " return MCTargetAsmParser::Match_InvalidOperand;\n\n";
// Check for Token operands first.
// FIXME: Use a more specific diagnostic type.
OS << " if (Operand.isToken())\n";
OS << " return isSubclass(matchTokenString(Operand.getToken()), Kind) ?\n"
<< " MCTargetAsmParser::Match_Success :\n"
<< " MCTargetAsmParser::Match_InvalidOperand;\n\n";
// Check the user classes. We don't care what order since we're only
// actually matching against one of them.
for (const auto &CI : Info.Classes) {
if (!CI.isUserClass())
continue;
OS << " // '" << CI.ClassName << "' class\n";
OS << " if (Kind == " << CI.Name << ") {\n";
OS << " if (Operand." << CI.PredicateMethod << "())\n";
OS << " return MCTargetAsmParser::Match_Success;\n";
if (!CI.DiagnosticType.empty())
OS << " return " << Info.Target.getName() << "AsmParser::Match_"
<< CI.DiagnosticType << ";\n";
OS << " }\n\n";
}
// Check for register operands, including sub-classes.
OS << " if (Operand.isReg()) {\n";
OS << " MatchClassKind OpKind;\n";
OS << " switch (Operand.getReg()) {\n";
OS << " default: OpKind = InvalidMatchClass; break;\n";
for (const auto &RC : Info.RegisterClasses)
OS << " case " << Info.Target.getName() << "::"
<< RC.first->getName() << ": OpKind = " << RC.second->Name
<< "; break;\n";
OS << " }\n";
OS << " return isSubclass(OpKind, Kind) ? "
<< "MCTargetAsmParser::Match_Success :\n "
<< " MCTargetAsmParser::Match_InvalidOperand;\n }\n\n";
// Generic fallthrough match failure case for operands that don't have
// specialized diagnostic types.
OS << " return MCTargetAsmParser::Match_InvalidOperand;\n";
OS << "}\n\n";
}
/// emitIsSubclass - Emit the subclass predicate function.
static void emitIsSubclass(CodeGenTarget &Target,
std::forward_list<ClassInfo> &Infos,
raw_ostream &OS) {
OS << "/// isSubclass - Compute whether \\p A is a subclass of \\p B.\n";
OS << "static bool isSubclass(MatchClassKind A, MatchClassKind B) {\n";
OS << " if (A == B)\n";
OS << " return true;\n\n";
std::string OStr;
raw_string_ostream SS(OStr);
unsigned Count = 0;
SS << " switch (A) {\n";
SS << " default:\n";
SS << " return false;\n";
for (const auto &A : Infos) {
std::vector<StringRef> SuperClasses;
for (const auto &B : Infos) {
if (&A != &B && A.isSubsetOf(B))
SuperClasses.push_back(B.Name);
}
if (SuperClasses.empty())
continue;
++Count;
SS << "\n case " << A.Name << ":\n";
if (SuperClasses.size() == 1) {
SS << " return B == " << SuperClasses.back().str() << ";\n";
continue;
}
if (!SuperClasses.empty()) {
SS << " switch (B) {\n";
SS << " default: return false;\n";
for (unsigned i = 0, e = SuperClasses.size(); i != e; ++i)
SS << " case " << SuperClasses[i].str() << ": return true;\n";
SS << " }\n";
} else {
// No case statement to emit
SS << " return false;\n";
}
}
SS << " }\n";
// If there were case statements emitted into the string stream, write them
// to the output stream, otherwise write the default.
if (Count)
OS << SS.str();
else
OS << " return false;\n";
OS << "}\n\n";
}
/// emitMatchTokenString - Emit the function to match a token string to the
/// appropriate match class value.
static void emitMatchTokenString(CodeGenTarget &Target,
std::forward_list<ClassInfo> &Infos,
raw_ostream &OS) {
// Construct the match list.
std::vector<StringMatcher::StringPair> Matches;
for (const auto &CI : Infos) {
if (CI.Kind == ClassInfo::Token)
Matches.emplace_back(CI.ValueName, "return " + CI.Name + ";");
}
OS << "static MatchClassKind matchTokenString(StringRef Name) {\n";
StringMatcher("Name", Matches, OS).Emit();
OS << " return InvalidMatchClass;\n";
OS << "}\n\n";
}
/// emitMatchRegisterName - Emit the function to match a string to the target
/// specific register enum.
static void emitMatchRegisterName(CodeGenTarget &Target, Record *AsmParser,
raw_ostream &OS) {
// Construct the match list.
std::vector<StringMatcher::StringPair> Matches;
const auto &Regs = Target.getRegBank().getRegisters();
for (const CodeGenRegister &Reg : Regs) {
if (Reg.TheDef->getValueAsString("AsmName").empty())
continue;
Matches.emplace_back(Reg.TheDef->getValueAsString("AsmName"),
"return " + utostr(Reg.EnumValue) + ";");
}
OS << "static unsigned MatchRegisterName(StringRef Name) {\n";
StringMatcher("Name", Matches, OS).Emit();
OS << " return 0;\n";
OS << "}\n\n";
}
static const char *getMinimalTypeForRange(uint64_t Range) {
assert(Range <= 0xFFFFFFFFFFFFFFFFULL && "Enum too large");
if (Range > 0xFFFFFFFFULL)
return "uint64_t";
if (Range > 0xFFFF)
return "uint32_t";
if (Range > 0xFF)
return "uint16_t";
return "uint8_t";
}
static const char *getMinimalRequiredFeaturesType(const AsmMatcherInfo &Info) {
uint64_t MaxIndex = Info.SubtargetFeatures.size();
if (MaxIndex > 0)
MaxIndex--;
return getMinimalTypeForRange(1ULL << MaxIndex);
}
/// emitSubtargetFeatureFlagEnumeration - Emit the subtarget feature flag
/// definitions.
static void emitSubtargetFeatureFlagEnumeration(AsmMatcherInfo &Info,
raw_ostream &OS) {
OS << "// Flags for subtarget features that participate in "
<< "instruction matching.\n";
OS << "enum SubtargetFeatureFlag : " << getMinimalRequiredFeaturesType(Info)
<< " {\n";
for (const auto &SF : Info.SubtargetFeatures) {
const SubtargetFeatureInfo &SFI = SF.second;
OS << " " << SFI.getEnumName() << " = (1ULL << " << SFI.Index << "),\n";
}
OS << " Feature_None = 0\n";
OS << "};\n\n";
}
/// emitOperandDiagnosticTypes - Emit the operand matching diagnostic types.
static void emitOperandDiagnosticTypes(AsmMatcherInfo &Info, raw_ostream &OS) {
// Get the set of diagnostic types from all of the operand classes.
std::set<StringRef> Types;
for (std::map<Record*, ClassInfo*>::const_iterator
I = Info.AsmOperandClasses.begin(),
E = Info.AsmOperandClasses.end(); I != E; ++I) {
if (!I->second->DiagnosticType.empty())
Types.insert(I->second->DiagnosticType);
}
if (Types.empty()) return;
// Now emit the enum entries.
for (std::set<StringRef>::const_iterator I = Types.begin(), E = Types.end();
I != E; ++I)
OS << " Match_" << *I << ",\n";
OS << " END_OPERAND_DIAGNOSTIC_TYPES\n";
}
/// emitGetSubtargetFeatureName - Emit the helper function to get the
/// user-level name for a subtarget feature.
static void emitGetSubtargetFeatureName(AsmMatcherInfo &Info, raw_ostream &OS) {
OS << "// User-level names for subtarget features that participate in\n"
<< "// instruction matching.\n"
<< "static const char *getSubtargetFeatureName(uint64_t Val) {\n";
if (!Info.SubtargetFeatures.empty()) {
OS << " switch(Val) {\n";
for (const auto &SF : Info.SubtargetFeatures) {
const SubtargetFeatureInfo &SFI = SF.second;
// FIXME: Totally just a placeholder name to get the algorithm working.
OS << " case " << SFI.getEnumName() << ": return \""
<< SFI.TheDef->getValueAsString("PredicateName") << "\";\n";
}
OS << " default: return \"(unknown)\";\n";
OS << " }\n";
} else {
// Nothing to emit, so skip the switch
OS << " return \"(unknown)\";\n";
}
OS << "}\n\n";
}
/// emitComputeAvailableFeatures - Emit the function to compute the list of
/// available features given a subtarget.
static void emitComputeAvailableFeatures(AsmMatcherInfo &Info,
raw_ostream &OS) {
std::string ClassName =
Info.AsmParser->getValueAsString("AsmParserClassName");
OS << "uint64_t " << Info.Target.getName() << ClassName << "::\n"
<< "ComputeAvailableFeatures(const FeatureBitset& FB) const {\n";
OS << " uint64_t Features = 0;\n";
for (const auto &SF : Info.SubtargetFeatures) {
const SubtargetFeatureInfo &SFI = SF.second;
OS << " if (";
std::string CondStorage =
SFI.TheDef->getValueAsString("AssemblerCondString");
StringRef Conds = CondStorage;
std::pair<StringRef,StringRef> Comma = Conds.split(',');
bool First = true;
do {
if (!First)
OS << " && ";
bool Neg = false;
StringRef Cond = Comma.first;
if (Cond[0] == '!') {
Neg = true;
Cond = Cond.substr(1);
}
OS << "(";
if (Neg)
OS << "!";
OS << "FB[" << Info.Target.getName() << "::" << Cond << "])";
if (Comma.second.empty())
break;
First = false;
Comma = Comma.second.split(',');
} while (true);
OS << ")\n";
OS << " Features |= " << SFI.getEnumName() << ";\n";
}
OS << " return Features;\n";
OS << "}\n\n";
}
static std::string GetAliasRequiredFeatures(Record *R,
const AsmMatcherInfo &Info) {
std::vector<Record*> ReqFeatures = R->getValueAsListOfDefs("Predicates");
std::string Result;
unsigned NumFeatures = 0;
for (unsigned i = 0, e = ReqFeatures.size(); i != e; ++i) {
const SubtargetFeatureInfo *F = Info.getSubtargetFeature(ReqFeatures[i]);
if (!F)
PrintFatalError(R->getLoc(), "Predicate '" + ReqFeatures[i]->getName() +
"' is not marked as an AssemblerPredicate!");
if (NumFeatures)
Result += '|';
Result += F->getEnumName();
++NumFeatures;
}
if (NumFeatures > 1)
Result = '(' + Result + ')';
return Result;
}
static void emitMnemonicAliasVariant(raw_ostream &OS,const AsmMatcherInfo &Info,
std::vector<Record*> &Aliases,
unsigned Indent = 0,
StringRef AsmParserVariantName = StringRef()){
// Keep track of all the aliases from a mnemonic. Use an std::map so that the
// iteration order of the map is stable.
std::map<std::string, std::vector<Record*> > AliasesFromMnemonic;
for (unsigned i = 0, e = Aliases.size(); i != e; ++i) {
Record *R = Aliases[i];
// FIXME: Allow AssemblerVariantName to be a comma separated list.
std::string AsmVariantName = R->getValueAsString("AsmVariantName");
if (AsmVariantName != AsmParserVariantName)
continue;
AliasesFromMnemonic[R->getValueAsString("FromMnemonic")].push_back(R);
}
if (AliasesFromMnemonic.empty())
return;
// Process each alias a "from" mnemonic at a time, building the code executed
// by the string remapper.
std::vector<StringMatcher::StringPair> Cases;
for (std::map<std::string, std::vector<Record*> >::iterator
I = AliasesFromMnemonic.begin(), E = AliasesFromMnemonic.end();
I != E; ++I) {
const std::vector<Record*> &ToVec = I->second;
// Loop through each alias and emit code that handles each case. If there
// are two instructions without predicates, emit an error. If there is one,
// emit it last.
std::string MatchCode;
int AliasWithNoPredicate = -1;
for (unsigned i = 0, e = ToVec.size(); i != e; ++i) {
Record *R = ToVec[i];
std::string FeatureMask = GetAliasRequiredFeatures(R, Info);
// If this unconditionally matches, remember it for later and diagnose
// duplicates.
if (FeatureMask.empty()) {
if (AliasWithNoPredicate != -1) {
// We can't have two aliases from the same mnemonic with no predicate.
PrintError(ToVec[AliasWithNoPredicate]->getLoc(),
"two MnemonicAliases with the same 'from' mnemonic!");
PrintFatalError(R->getLoc(), "this is the other MnemonicAlias.");
}
AliasWithNoPredicate = i;
continue;
}
if (R->getValueAsString("ToMnemonic") == I->first)
PrintFatalError(R->getLoc(), "MnemonicAlias to the same string");
if (!MatchCode.empty())
MatchCode += "else ";
MatchCode += "if ((Features & " + FeatureMask + ") == "+FeatureMask+")\n";
MatchCode += " Mnemonic = \"" +R->getValueAsString("ToMnemonic")+"\";\n";
}
if (AliasWithNoPredicate != -1) {
Record *R = ToVec[AliasWithNoPredicate];
if (!MatchCode.empty())
MatchCode += "else\n ";
MatchCode += "Mnemonic = \"" + R->getValueAsString("ToMnemonic")+"\";\n";
}
MatchCode += "return;";
Cases.push_back(std::make_pair(I->first, MatchCode));
}
StringMatcher("Mnemonic", Cases, OS).Emit(Indent);
}
/// emitMnemonicAliases - If the target has any MnemonicAlias<> definitions,
/// emit a function for them and return true, otherwise return false.
static bool emitMnemonicAliases(raw_ostream &OS, const AsmMatcherInfo &Info,
CodeGenTarget &Target) {
// Ignore aliases when match-prefix is set.
if (!MatchPrefix.empty())
return false;
std::vector<Record*> Aliases =
Info.getRecords().getAllDerivedDefinitions("MnemonicAlias");
if (Aliases.empty()) return false;
OS << "static void applyMnemonicAliases(StringRef &Mnemonic, "
"uint64_t Features, unsigned VariantID) {\n";
OS << " switch (VariantID) {\n";
unsigned VariantCount = Target.getAsmParserVariantCount();
for (unsigned VC = 0; VC != VariantCount; ++VC) {
Record *AsmVariant = Target.getAsmParserVariant(VC);
int AsmParserVariantNo = AsmVariant->getValueAsInt("Variant");
std::string AsmParserVariantName = AsmVariant->getValueAsString("Name");
OS << " case " << AsmParserVariantNo << ":\n";
emitMnemonicAliasVariant(OS, Info, Aliases, /*Indent=*/2,
AsmParserVariantName);
OS << " break;\n";
}
OS << " }\n";
// Emit aliases that apply to all variants.
emitMnemonicAliasVariant(OS, Info, Aliases);
OS << "}\n\n";
return true;
}
static void emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
const AsmMatcherInfo &Info, StringRef ClassName,
StringToOffsetTable &StringTable,
unsigned MaxMnemonicIndex) {
unsigned MaxMask = 0;
for (std::vector<OperandMatchEntry>::const_iterator it =
Info.OperandMatchInfo.begin(), ie = Info.OperandMatchInfo.end();
it != ie; ++it) {
MaxMask |= it->OperandMask;
}
// Emit the static custom operand parsing table;
OS << "namespace {\n";
OS << " struct OperandMatchEntry {\n";
OS << " " << getMinimalRequiredFeaturesType(Info)
<< " RequiredFeatures;\n";
OS << " " << getMinimalTypeForRange(MaxMnemonicIndex)
<< " Mnemonic;\n";
OS << " " << getMinimalTypeForRange(std::distance(
Info.Classes.begin(), Info.Classes.end())) << " Class;\n";
OS << " " << getMinimalTypeForRange(MaxMask)
<< " OperandMask;\n\n";
OS << " StringRef getMnemonic() const {\n";
OS << " return StringRef(MnemonicTable + Mnemonic + 1,\n";
OS << " MnemonicTable[Mnemonic]);\n";
OS << " }\n";
OS << " };\n\n";
OS << " // Predicate for searching for an opcode.\n";
OS << " struct LessOpcodeOperand {\n";
OS << " bool operator()(const OperandMatchEntry &LHS, StringRef RHS) {\n";
OS << " return LHS.getMnemonic() < RHS;\n";
OS << " }\n";
OS << " bool operator()(StringRef LHS, const OperandMatchEntry &RHS) {\n";
OS << " return LHS < RHS.getMnemonic();\n";
OS << " }\n";
OS << " bool operator()(const OperandMatchEntry &LHS,";
OS << " const OperandMatchEntry &RHS) {\n";
OS << " return LHS.getMnemonic() < RHS.getMnemonic();\n";
OS << " }\n";
OS << " };\n";
OS << "} // end anonymous namespace.\n\n";
OS << "static const OperandMatchEntry OperandMatchTable["
<< Info.OperandMatchInfo.size() << "] = {\n";
OS << " /* Operand List Mask, Mnemonic, Operand Class, Features */\n";
for (std::vector<OperandMatchEntry>::const_iterator it =
Info.OperandMatchInfo.begin(), ie = Info.OperandMatchInfo.end();
it != ie; ++it) {
const OperandMatchEntry &OMI = *it;
const MatchableInfo &II = *OMI.MI;
OS << " { ";
// Write the required features mask.
if (!II.RequiredFeatures.empty()) {
for (unsigned i = 0, e = II.RequiredFeatures.size(); i != e; ++i) {
if (i) OS << "|";
OS << II.RequiredFeatures[i]->getEnumName();
}
} else
OS << "0";
// Store a pascal-style length byte in the mnemonic.
std::string LenMnemonic = char(II.Mnemonic.size()) + II.Mnemonic.str();
OS << ", " << StringTable.GetOrAddStringOffset(LenMnemonic, false)
<< " /* " << II.Mnemonic << " */, ";
OS << OMI.CI->Name;
OS << ", " << OMI.OperandMask;
OS << " /* ";
bool printComma = false;
for (int i = 0, e = 31; i !=e; ++i)
if (OMI.OperandMask & (1 << i)) {
if (printComma)
OS << ", ";
OS << i;
printComma = true;
}
OS << " */";
OS << " },\n";
}
OS << "};\n\n";
// Emit the operand class switch to call the correct custom parser for
// the found operand class.
OS << Target.getName() << ClassName << "::OperandMatchResultTy "
<< Target.getName() << ClassName << "::\n"
<< "tryCustomParseOperand(OperandVector"
<< " &Operands,\n unsigned MCK) {\n\n"
<< " switch(MCK) {\n";
for (const auto &CI : Info.Classes) {
if (CI.ParserMethod.empty())
continue;
OS << " case " << CI.Name << ":\n"
<< " return " << CI.ParserMethod << "(Operands);\n";
}
OS << " default:\n";
OS << " return MatchOperand_NoMatch;\n";
OS << " }\n";
OS << " return MatchOperand_NoMatch;\n";
OS << "}\n\n";
// Emit the static custom operand parser. This code is very similar with
// the other matcher. Also use MatchResultTy here just in case we go for
// a better error handling.
OS << Target.getName() << ClassName << "::OperandMatchResultTy "
<< Target.getName() << ClassName << "::\n"
<< "MatchOperandParserImpl(OperandVector"
<< " &Operands,\n StringRef Mnemonic) {\n";
// Emit code to get the available features.
OS << " // Get the current feature set.\n";
OS << " uint64_t AvailableFeatures = getAvailableFeatures();\n\n";
OS << " // Get the next operand index.\n";
OS << " unsigned NextOpNum = Operands.size()-1;\n";
// Emit code to search the table.
OS << " // Search the table.\n";
OS << " std::pair<const OperandMatchEntry*, const OperandMatchEntry*>";
OS << " MnemonicRange =\n";
OS << " std::equal_range(OperandMatchTable, OperandMatchTable+"
<< Info.OperandMatchInfo.size() << ", Mnemonic,\n"
<< " LessOpcodeOperand());\n\n";
OS << " if (MnemonicRange.first == MnemonicRange.second)\n";
OS << " return MatchOperand_NoMatch;\n\n";
OS << " for (const OperandMatchEntry *it = MnemonicRange.first,\n"
<< " *ie = MnemonicRange.second; it != ie; ++it) {\n";
OS << " // equal_range guarantees that instruction mnemonic matches.\n";
OS << " assert(Mnemonic == it->getMnemonic());\n\n";
// Emit check that the required features are available.
OS << " // check if the available features match\n";
OS << " if ((AvailableFeatures & it->RequiredFeatures) "
<< "!= it->RequiredFeatures) {\n";
OS << " continue;\n";
OS << " }\n\n";
// Emit check to ensure the operand number matches.
OS << " // check if the operand in question has a custom parser.\n";
OS << " if (!(it->OperandMask & (1 << NextOpNum)))\n";
OS << " continue;\n\n";
// Emit call to the custom parser method
OS << " // call custom parse method to handle the operand\n";
OS << " OperandMatchResultTy Result = ";
OS << "tryCustomParseOperand(Operands, it->Class);\n";
OS << " if (Result != MatchOperand_NoMatch)\n";
OS << " return Result;\n";
OS << " }\n\n";
OS << " // Okay, we had no match.\n";
OS << " return MatchOperand_NoMatch;\n";
OS << "}\n\n";
}
void AsmMatcherEmitter::run(raw_ostream &OS) {
CodeGenTarget Target(Records);
Record *AsmParser = Target.getAsmParser();
std::string ClassName = AsmParser->getValueAsString("AsmParserClassName");
// Compute the information on the instructions to match.
AsmMatcherInfo Info(AsmParser, Target, Records);
Info.buildInfo();
// Sort the instruction table using the partial order on classes. We use
// stable_sort to ensure that ambiguous instructions are still
// deterministically ordered.
std::stable_sort(Info.Matchables.begin(), Info.Matchables.end(),
[](const std::unique_ptr<MatchableInfo> &a,
const std::unique_ptr<MatchableInfo> &b){
return *a < *b;});
DEBUG_WITH_TYPE("instruction_info", {
for (const auto &MI : Info.Matchables)
MI->dump();
});
// Check for ambiguous matchables.
DEBUG_WITH_TYPE("ambiguous_instrs", {
unsigned NumAmbiguous = 0;
for (auto I = Info.Matchables.begin(), E = Info.Matchables.end(); I != E;
++I) {
for (auto J = std::next(I); J != E; ++J) {
const MatchableInfo &A = **I;
const MatchableInfo &B = **J;
if (A.couldMatchAmbiguouslyWith(B)) {
errs() << "warning: ambiguous matchables:\n";
A.dump();
errs() << "\nis incomparable with:\n";
B.dump();
errs() << "\n\n";
++NumAmbiguous;
}
}
}
if (NumAmbiguous)
errs() << "warning: " << NumAmbiguous
<< " ambiguous matchables!\n";
});
// Compute the information on the custom operand parsing.
Info.buildOperandMatchInfo();
// Write the output.
// Information for the class declaration.
OS << "\n#ifdef GET_ASSEMBLER_HEADER\n";
OS << "#undef GET_ASSEMBLER_HEADER\n";
OS << " // This should be included into the middle of the declaration of\n";
OS << " // your subclasses implementation of MCTargetAsmParser.\n";
OS << " uint64_t ComputeAvailableFeatures(const FeatureBitset& FB) const;\n";
OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, "
<< "unsigned Opcode,\n"
<< " const OperandVector "
<< "&Operands);\n";
OS << " void convertToMapAndConstraints(unsigned Kind,\n ";
OS << " const OperandVector &Operands) override;\n";
OS << " bool mnemonicIsValid(StringRef Mnemonic, unsigned VariantID) override;\n";
OS << " unsigned MatchInstructionImpl(const OperandVector &Operands,\n"
<< " MCInst &Inst,\n"
<< " uint64_t &ErrorInfo,"
<< " bool matchingInlineAsm,\n"
<< " unsigned VariantID = 0);\n";
if (!Info.OperandMatchInfo.empty()) {
OS << "\n enum OperandMatchResultTy {\n";
OS << " MatchOperand_Success, // operand matched successfully\n";
OS << " MatchOperand_NoMatch, // operand did not match\n";
OS << " MatchOperand_ParseFail // operand matched but had errors\n";
OS << " };\n";
OS << " OperandMatchResultTy MatchOperandParserImpl(\n";
OS << " OperandVector &Operands,\n";
OS << " StringRef Mnemonic);\n";
OS << " OperandMatchResultTy tryCustomParseOperand(\n";
OS << " OperandVector &Operands,\n";
OS << " unsigned MCK);\n\n";
}
OS << "#endif // GET_ASSEMBLER_HEADER_INFO\n\n";
// Emit the operand match diagnostic enum names.
OS << "\n#ifdef GET_OPERAND_DIAGNOSTIC_TYPES\n";
OS << "#undef GET_OPERAND_DIAGNOSTIC_TYPES\n\n";
emitOperandDiagnosticTypes(Info, OS);
OS << "#endif // GET_OPERAND_DIAGNOSTIC_TYPES\n\n";
OS << "\n#ifdef GET_REGISTER_MATCHER\n";
OS << "#undef GET_REGISTER_MATCHER\n\n";
// Emit the subtarget feature enumeration.
emitSubtargetFeatureFlagEnumeration(Info, OS);
// Emit the function to match a register name to number.
// This should be omitted for Mips target
if (AsmParser->getValueAsBit("ShouldEmitMatchRegisterName"))
emitMatchRegisterName(Target, AsmParser, OS);
OS << "#endif // GET_REGISTER_MATCHER\n\n";
OS << "\n#ifdef GET_SUBTARGET_FEATURE_NAME\n";
OS << "#undef GET_SUBTARGET_FEATURE_NAME\n\n";
// Generate the helper function to get the names for subtarget features.
emitGetSubtargetFeatureName(Info, OS);
OS << "#endif // GET_SUBTARGET_FEATURE_NAME\n\n";
OS << "\n#ifdef GET_MATCHER_IMPLEMENTATION\n";
OS << "#undef GET_MATCHER_IMPLEMENTATION\n\n";
// Generate the function that remaps for mnemonic aliases.
bool HasMnemonicAliases = emitMnemonicAliases(OS, Info, Target);
// Generate the convertToMCInst function to convert operands into an MCInst.
// Also, generate the convertToMapAndConstraints function for MS-style inline
// assembly. The latter doesn't actually generate a MCInst.
emitConvertFuncs(Target, ClassName, Info.Matchables, OS);
// Emit the enumeration for classes which participate in matching.
emitMatchClassEnumeration(Target, Info.Classes, OS);
// Emit the routine to match token strings to their match class.
emitMatchTokenString(Target, Info.Classes, OS);
// Emit the subclass predicate routine.
emitIsSubclass(Target, Info.Classes, OS);
// Emit the routine to validate an operand against a match class.
emitValidateOperandClass(Info, OS);
// Emit the available features compute function.
emitComputeAvailableFeatures(Info, OS);
StringToOffsetTable StringTable;
size_t MaxNumOperands = 0;
unsigned MaxMnemonicIndex = 0;
bool HasDeprecation = false;
for (const auto &MI : Info.Matchables) {
MaxNumOperands = std::max(MaxNumOperands, MI->AsmOperands.size());
HasDeprecation |= MI->HasDeprecation;
// Store a pascal-style length byte in the mnemonic.
std::string LenMnemonic = char(MI->Mnemonic.size()) + MI->Mnemonic.str();
MaxMnemonicIndex = std::max(MaxMnemonicIndex,
StringTable.GetOrAddStringOffset(LenMnemonic, false));
}
OS << "static const char *const MnemonicTable =\n";
StringTable.EmitString(OS);
OS << ";\n\n";
// Emit the static match table; unused classes get initalized to 0 which is
// guaranteed to be InvalidMatchClass.
//
// FIXME: We can reduce the size of this table very easily. First, we change
// it so that store the kinds in separate bit-fields for each index, which
// only needs to be the max width used for classes at that index (we also need
// to reject based on this during classification). If we then make sure to
// order the match kinds appropriately (putting mnemonics last), then we
// should only end up using a few bits for each class, especially the ones
// following the mnemonic.
OS << "namespace {\n";
OS << " struct MatchEntry {\n";
OS << " " << getMinimalTypeForRange(MaxMnemonicIndex)
<< " Mnemonic;\n";
OS << " uint16_t Opcode;\n";
OS << " " << getMinimalTypeForRange(Info.Matchables.size())
<< " ConvertFn;\n";
OS << " " << getMinimalRequiredFeaturesType(Info)
<< " RequiredFeatures;\n";
OS << " " << getMinimalTypeForRange(
std::distance(Info.Classes.begin(), Info.Classes.end()))
<< " Classes[" << MaxNumOperands << "];\n";
OS << " StringRef getMnemonic() const {\n";
OS << " return StringRef(MnemonicTable + Mnemonic + 1,\n";
OS << " MnemonicTable[Mnemonic]);\n";
OS << " }\n";
OS << " };\n\n";
OS << " // Predicate for searching for an opcode.\n";
OS << " struct LessOpcode {\n";
OS << " bool operator()(const MatchEntry &LHS, StringRef RHS) {\n";
OS << " return LHS.getMnemonic() < RHS;\n";
OS << " }\n";
OS << " bool operator()(StringRef LHS, const MatchEntry &RHS) {\n";
OS << " return LHS < RHS.getMnemonic();\n";
OS << " }\n";
OS << " bool operator()(const MatchEntry &LHS, const MatchEntry &RHS) {\n";
OS << " return LHS.getMnemonic() < RHS.getMnemonic();\n";
OS << " }\n";
OS << " };\n";
OS << "} // end anonymous namespace.\n\n";
unsigned VariantCount = Target.getAsmParserVariantCount();
for (unsigned VC = 0; VC != VariantCount; ++VC) {
Record *AsmVariant = Target.getAsmParserVariant(VC);
int AsmVariantNo = AsmVariant->getValueAsInt("Variant");
OS << "static const MatchEntry MatchTable" << VC << "[] = {\n";
for (const auto &MI : Info.Matchables) {
if (MI->AsmVariantID != AsmVariantNo)
continue;
// Store a pascal-style length byte in the mnemonic.
std::string LenMnemonic = char(MI->Mnemonic.size()) + MI->Mnemonic.str();
OS << " { " << StringTable.GetOrAddStringOffset(LenMnemonic, false)
<< " /* " << MI->Mnemonic << " */, "
<< Target.getName() << "::"
<< MI->getResultInst()->TheDef->getName() << ", "
<< MI->ConversionFnKind << ", ";
// Write the required features mask.
if (!MI->RequiredFeatures.empty()) {
for (unsigned i = 0, e = MI->RequiredFeatures.size(); i != e; ++i) {
if (i) OS << "|";
OS << MI->RequiredFeatures[i]->getEnumName();
}
} else
OS << "0";
OS << ", { ";
for (unsigned i = 0, e = MI->AsmOperands.size(); i != e; ++i) {
const MatchableInfo::AsmOperand &Op = MI->AsmOperands[i];
if (i) OS << ", ";
OS << Op.Class->Name;
}
OS << " }, },\n";
}
OS << "};\n\n";
}
// A method to determine if a mnemonic is in the list.
OS << "bool " << Target.getName() << ClassName << "::\n"
<< "mnemonicIsValid(StringRef Mnemonic, unsigned VariantID) {\n";
OS << " // Find the appropriate table for this asm variant.\n";
OS << " const MatchEntry *Start, *End;\n";
OS << " switch (VariantID) {\n";
OS << " default: llvm_unreachable(\"invalid variant!\");\n";
for (unsigned VC = 0; VC != VariantCount; ++VC) {
Record *AsmVariant = Target.getAsmParserVariant(VC);
int AsmVariantNo = AsmVariant->getValueAsInt("Variant");
OS << " case " << AsmVariantNo << ": Start = std::begin(MatchTable" << VC
<< "); End = std::end(MatchTable" << VC << "); break;\n";
}
OS << " }\n";
OS << " // Search the table.\n";
OS << " std::pair<const MatchEntry*, const MatchEntry*> MnemonicRange =\n";
OS << " std::equal_range(Start, End, Mnemonic, LessOpcode());\n";
OS << " return MnemonicRange.first != MnemonicRange.second;\n";
OS << "}\n\n";
// Finally, build the match function.
OS << "unsigned " << Target.getName() << ClassName << "::\n"
<< "MatchInstructionImpl(const OperandVector &Operands,\n";
OS << " MCInst &Inst, uint64_t &ErrorInfo,\n"
<< " bool matchingInlineAsm, unsigned VariantID) {\n";
OS << " // Eliminate obvious mismatches.\n";
OS << " if (Operands.size() > " << (MaxNumOperands+1) << ") {\n";
OS << " ErrorInfo = " << (MaxNumOperands+1) << ";\n";
OS << " return Match_InvalidOperand;\n";
OS << " }\n\n";
// Emit code to get the available features.
OS << " // Get the current feature set.\n";
OS << " uint64_t AvailableFeatures = getAvailableFeatures();\n\n";
OS << " // Get the instruction mnemonic, which is the first token.\n";
OS << " StringRef Mnemonic = ((" << Target.getName()
<< "Operand&)*Operands[0]).getToken();\n\n";
if (HasMnemonicAliases) {
OS << " // Process all MnemonicAliases to remap the mnemonic.\n";
OS << " applyMnemonicAliases(Mnemonic, AvailableFeatures, VariantID);\n\n";
}
// Emit code to compute the class list for this operand vector.
OS << " // Some state to try to produce better error messages.\n";
OS << " bool HadMatchOtherThanFeatures = false;\n";
OS << " bool HadMatchOtherThanPredicate = false;\n";
OS << " unsigned RetCode = Match_InvalidOperand;\n";
OS << " uint64_t MissingFeatures = ~0ULL;\n";
OS << " // Set ErrorInfo to the operand that mismatches if it is\n";
OS << " // wrong for all instances of the instruction.\n";
OS << " ErrorInfo = ~0ULL;\n";
// Emit code to search the table.
OS << " // Find the appropriate table for this asm variant.\n";
OS << " const MatchEntry *Start, *End;\n";
OS << " switch (VariantID) {\n";
OS << " default: llvm_unreachable(\"invalid variant!\");\n";
for (unsigned VC = 0; VC != VariantCount; ++VC) {
Record *AsmVariant = Target.getAsmParserVariant(VC);
int AsmVariantNo = AsmVariant->getValueAsInt("Variant");
OS << " case " << AsmVariantNo << ": Start = std::begin(MatchTable" << VC
<< "); End = std::end(MatchTable" << VC << "); break;\n";
}
OS << " }\n";
OS << " // Search the table.\n";
OS << " std::pair<const MatchEntry*, const MatchEntry*> MnemonicRange =\n";
OS << " std::equal_range(Start, End, Mnemonic, LessOpcode());\n\n";
OS << " // Return a more specific error code if no mnemonics match.\n";
OS << " if (MnemonicRange.first == MnemonicRange.second)\n";
OS << " return Match_MnemonicFail;\n\n";
OS << " for (const MatchEntry *it = MnemonicRange.first, "
<< "*ie = MnemonicRange.second;\n";
OS << " it != ie; ++it) {\n";
OS << " // equal_range guarantees that instruction mnemonic matches.\n";
OS << " assert(Mnemonic == it->getMnemonic());\n";
// Emit check that the subclasses match.
OS << " bool OperandsValid = true;\n";
OS << " for (unsigned i = 0; i != " << MaxNumOperands << "; ++i) {\n";
OS << " if (i + 1 >= Operands.size()) {\n";
OS << " OperandsValid = (it->Classes[i] == " <<"InvalidMatchClass);\n";
OS << " if (!OperandsValid) ErrorInfo = i + 1;\n";
OS << " break;\n";
OS << " }\n";
OS << " unsigned Diag = validateOperandClass(*Operands[i+1],\n";
OS.indent(43);
OS << "(MatchClassKind)it->Classes[i]);\n";
OS << " if (Diag == Match_Success)\n";
OS << " continue;\n";
OS << " // If the generic handler indicates an invalid operand\n";
OS << " // failure, check for a special case.\n";
OS << " if (Diag == Match_InvalidOperand) {\n";
OS << " Diag = validateTargetOperandClass(*Operands[i+1],\n";
OS.indent(43);
OS << "(MatchClassKind)it->Classes[i]);\n";
OS << " if (Diag == Match_Success)\n";
OS << " continue;\n";
OS << " }\n";
OS << " // If this operand is broken for all of the instances of this\n";
OS << " // mnemonic, keep track of it so we can report loc info.\n";
OS << " // If we already had a match that only failed due to a\n";
OS << " // target predicate, that diagnostic is preferred.\n";
OS << " if (!HadMatchOtherThanPredicate &&\n";
OS << " (it == MnemonicRange.first || ErrorInfo <= i+1)) {\n";
OS << " ErrorInfo = i+1;\n";
OS << " // InvalidOperand is the default. Prefer specificity.\n";
OS << " if (Diag != Match_InvalidOperand)\n";
OS << " RetCode = Diag;\n";
OS << " }\n";
OS << " // Otherwise, just reject this instance of the mnemonic.\n";
OS << " OperandsValid = false;\n";
OS << " break;\n";
OS << " }\n\n";
OS << " if (!OperandsValid) continue;\n";
// Emit check that the required features are available.
OS << " if ((AvailableFeatures & it->RequiredFeatures) "
<< "!= it->RequiredFeatures) {\n";
OS << " HadMatchOtherThanFeatures = true;\n";
OS << " uint64_t NewMissingFeatures = it->RequiredFeatures & "
"~AvailableFeatures;\n";
OS << " if (countPopulation(NewMissingFeatures) <=\n"
" countPopulation(MissingFeatures))\n";
OS << " MissingFeatures = NewMissingFeatures;\n";
OS << " continue;\n";
OS << " }\n";
OS << "\n";
OS << " Inst.clear();\n\n";
OS << " if (matchingInlineAsm) {\n";
OS << " Inst.setOpcode(it->Opcode);\n";
OS << " convertToMapAndConstraints(it->ConvertFn, Operands);\n";
OS << " return Match_Success;\n";
OS << " }\n\n";
OS << " // We have selected a definite instruction, convert the parsed\n"
<< " // operands into the appropriate MCInst.\n";
OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n";
OS << "\n";
// Verify the instruction with the target-specific match predicate function.
OS << " // We have a potential match. Check the target predicate to\n"
<< " // handle any context sensitive constraints.\n"
<< " unsigned MatchResult;\n"
<< " if ((MatchResult = checkTargetMatchPredicate(Inst)) !="
<< " Match_Success) {\n"
<< " Inst.clear();\n"
<< " RetCode = MatchResult;\n"
<< " HadMatchOtherThanPredicate = true;\n"
<< " continue;\n"
<< " }\n\n";
// Call the post-processing function, if used.
std::string InsnCleanupFn =
AsmParser->getValueAsString("AsmParserInstCleanup");
if (!InsnCleanupFn.empty())
OS << " " << InsnCleanupFn << "(Inst);\n";
if (HasDeprecation) {
OS << " std::string Info;\n";
OS << " if (MII.get(Inst.getOpcode()).getDeprecatedInfo(Inst, STI, Info)) {\n";
OS << " SMLoc Loc = ((" << Target.getName()
<< "Operand&)*Operands[0]).getStartLoc();\n";
OS << " getParser().Warning(Loc, Info, None);\n";
OS << " }\n";
}
OS << " return Match_Success;\n";
OS << " }\n\n";
OS << " // Okay, we had no match. Try to return a useful error code.\n";
OS << " if (HadMatchOtherThanPredicate || !HadMatchOtherThanFeatures)\n";
OS << " return RetCode;\n\n";
OS << " // Missing feature matches return which features were missing\n";
OS << " ErrorInfo = MissingFeatures;\n";
OS << " return Match_MissingFeature;\n";
OS << "}\n\n";
if (!Info.OperandMatchInfo.empty())
emitCustomOperandParsing(OS, Target, Info, ClassName, StringTable,
MaxMnemonicIndex);
OS << "#endif // GET_MATCHER_IMPLEMENTATION\n\n";
}
namespace llvm {
void EmitAsmMatcher(RecordKeeper &RK, raw_ostream &OS) {
emitSourceFileHeader("Assembly Matcher Source Fragment", OS);
AsmMatcherEmitter(RK).run(OS);
}
} // End llvm namespace
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/CodeGenRegisters.cpp | //===- CodeGenRegisters.cpp - Register and RegisterClass Info -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines structures to encapsulate information gleaned from the
// target register and register class definitions.
//
//===----------------------------------------------------------------------===//
#include "CodeGenRegisters.h"
#include "CodeGenTarget.h"
#include "llvm/ADT/IntEqClasses.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Debug.h"
#include "llvm/TableGen/Error.h"
using namespace llvm;
#define DEBUG_TYPE "regalloc-emitter"
//===----------------------------------------------------------------------===//
// CodeGenSubRegIndex
//===----------------------------------------------------------------------===//
CodeGenSubRegIndex::CodeGenSubRegIndex(Record *R, unsigned Enum)
: TheDef(R), EnumValue(Enum), LaneMask(0), AllSuperRegsCovered(true) {
Name = R->getName();
if (R->getValue("Namespace"))
Namespace = R->getValueAsString("Namespace");
Size = R->getValueAsInt("Size");
Offset = R->getValueAsInt("Offset");
}
CodeGenSubRegIndex::CodeGenSubRegIndex(StringRef N, StringRef Nspace,
unsigned Enum)
: TheDef(nullptr), Name(N), Namespace(Nspace), Size(-1), Offset(-1),
EnumValue(Enum), LaneMask(0), AllSuperRegsCovered(true) {
}
std::string CodeGenSubRegIndex::getQualifiedName() const {
std::string N = getNamespace();
if (!N.empty())
N += "::";
N += getName();
return N;
}
void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) {
if (!TheDef)
return;
std::vector<Record*> Comps = TheDef->getValueAsListOfDefs("ComposedOf");
if (!Comps.empty()) {
if (Comps.size() != 2)
PrintFatalError(TheDef->getLoc(),
"ComposedOf must have exactly two entries");
CodeGenSubRegIndex *A = RegBank.getSubRegIdx(Comps[0]);
CodeGenSubRegIndex *B = RegBank.getSubRegIdx(Comps[1]);
CodeGenSubRegIndex *X = A->addComposite(B, this);
if (X)
PrintFatalError(TheDef->getLoc(), "Ambiguous ComposedOf entries");
}
std::vector<Record*> Parts =
TheDef->getValueAsListOfDefs("CoveringSubRegIndices");
if (!Parts.empty()) {
if (Parts.size() < 2)
PrintFatalError(TheDef->getLoc(),
"CoveredBySubRegs must have two or more entries");
SmallVector<CodeGenSubRegIndex*, 8> IdxParts;
for (unsigned i = 0, e = Parts.size(); i != e; ++i)
IdxParts.push_back(RegBank.getSubRegIdx(Parts[i]));
RegBank.addConcatSubRegIndex(IdxParts, this);
}
}
unsigned CodeGenSubRegIndex::computeLaneMask() const {
// Already computed?
if (LaneMask)
return LaneMask;
// Recursion guard, shouldn't be required.
LaneMask = ~0u;
// The lane mask is simply the union of all sub-indices.
unsigned M = 0;
for (const auto &C : Composed)
M |= C.second->computeLaneMask();
assert(M && "Missing lane mask, sub-register cycle?");
LaneMask = M;
return LaneMask;
}
//===----------------------------------------------------------------------===//
// CodeGenRegister
//===----------------------------------------------------------------------===//
CodeGenRegister::CodeGenRegister(Record *R, unsigned Enum)
: TheDef(R),
EnumValue(Enum),
CostPerUse(R->getValueAsInt("CostPerUse")),
CoveredBySubRegs(R->getValueAsBit("CoveredBySubRegs")),
HasDisjunctSubRegs(false),
SubRegsComplete(false),
SuperRegsComplete(false),
TopoSig(~0u)
{}
void CodeGenRegister::buildObjectGraph(CodeGenRegBank &RegBank) {
std::vector<Record*> SRIs = TheDef->getValueAsListOfDefs("SubRegIndices");
std::vector<Record*> SRs = TheDef->getValueAsListOfDefs("SubRegs");
if (SRIs.size() != SRs.size())
PrintFatalError(TheDef->getLoc(),
"SubRegs and SubRegIndices must have the same size");
for (unsigned i = 0, e = SRIs.size(); i != e; ++i) {
ExplicitSubRegIndices.push_back(RegBank.getSubRegIdx(SRIs[i]));
ExplicitSubRegs.push_back(RegBank.getReg(SRs[i]));
}
// Also compute leading super-registers. Each register has a list of
// covered-by-subregs super-registers where it appears as the first explicit
// sub-register.
//
// This is used by computeSecondarySubRegs() to find candidates.
if (CoveredBySubRegs && !ExplicitSubRegs.empty())
ExplicitSubRegs.front()->LeadingSuperRegs.push_back(this);
// Add ad hoc alias links. This is a symmetric relationship between two
// registers, so build a symmetric graph by adding links in both ends.
std::vector<Record*> Aliases = TheDef->getValueAsListOfDefs("Aliases");
for (unsigned i = 0, e = Aliases.size(); i != e; ++i) {
CodeGenRegister *Reg = RegBank.getReg(Aliases[i]);
ExplicitAliases.push_back(Reg);
Reg->ExplicitAliases.push_back(this);
}
}
const std::string &CodeGenRegister::getName() const {
assert(TheDef && "no def");
return TheDef->getName();
}
namespace {
// Iterate over all register units in a set of registers.
class RegUnitIterator {
CodeGenRegister::Vec::const_iterator RegI, RegE;
CodeGenRegister::RegUnitList::iterator UnitI, UnitE;
public:
RegUnitIterator(const CodeGenRegister::Vec &Regs):
RegI(Regs.begin()), RegE(Regs.end()), UnitI(), UnitE() {
if (RegI != RegE) {
UnitI = (*RegI)->getRegUnits().begin();
UnitE = (*RegI)->getRegUnits().end();
advance();
}
}
bool isValid() const { return UnitI != UnitE; }
unsigned operator* () const { assert(isValid()); return *UnitI; }
const CodeGenRegister *getReg() const { assert(isValid()); return *RegI; }
/// Preincrement. Move to the next unit.
void operator++() {
assert(isValid() && "Cannot advance beyond the last operand");
++UnitI;
advance();
}
protected:
void advance() {
while (UnitI == UnitE) {
if (++RegI == RegE)
break;
UnitI = (*RegI)->getRegUnits().begin();
UnitE = (*RegI)->getRegUnits().end();
}
}
};
} // namespace
// Return true of this unit appears in RegUnits.
static bool hasRegUnit(CodeGenRegister::RegUnitList &RegUnits, unsigned Unit) {
return RegUnits.test(Unit);
}
// Inherit register units from subregisters.
// Return true if the RegUnits changed.
bool CodeGenRegister::inheritRegUnits(CodeGenRegBank &RegBank) {
bool changed = false;
for (SubRegMap::const_iterator I = SubRegs.begin(), E = SubRegs.end();
I != E; ++I) {
CodeGenRegister *SR = I->second;
// Merge the subregister's units into this register's RegUnits.
changed |= (RegUnits |= SR->RegUnits);
}
return changed;
}
const CodeGenRegister::SubRegMap &
CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) {
// Only compute this map once.
if (SubRegsComplete)
return SubRegs;
SubRegsComplete = true;
HasDisjunctSubRegs = ExplicitSubRegs.size() > 1;
// First insert the explicit subregs and make sure they are fully indexed.
for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
CodeGenRegister *SR = ExplicitSubRegs[i];
CodeGenSubRegIndex *Idx = ExplicitSubRegIndices[i];
if (!SubRegs.insert(std::make_pair(Idx, SR)).second)
PrintFatalError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() +
" appears twice in Register " + getName());
// Map explicit sub-registers first, so the names take precedence.
// The inherited sub-registers are mapped below.
SubReg2Idx.insert(std::make_pair(SR, Idx));
}
// Keep track of inherited subregs and how they can be reached.
SmallPtrSet<CodeGenRegister*, 8> Orphans;
// Clone inherited subregs and place duplicate entries in Orphans.
// Here the order is important - earlier subregs take precedence.
for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
CodeGenRegister *SR = ExplicitSubRegs[i];
const SubRegMap &Map = SR->computeSubRegs(RegBank);
HasDisjunctSubRegs |= SR->HasDisjunctSubRegs;
for (SubRegMap::const_iterator SI = Map.begin(), SE = Map.end(); SI != SE;
++SI) {
if (!SubRegs.insert(*SI).second)
Orphans.insert(SI->second);
}
}
// Expand any composed subreg indices.
// If dsub_2 has ComposedOf = [qsub_1, dsub_0], and this register has a
// qsub_1 subreg, add a dsub_2 subreg. Keep growing Indices and process
// expanded subreg indices recursively.
SmallVector<CodeGenSubRegIndex*, 8> Indices = ExplicitSubRegIndices;
for (unsigned i = 0; i != Indices.size(); ++i) {
CodeGenSubRegIndex *Idx = Indices[i];
const CodeGenSubRegIndex::CompMap &Comps = Idx->getComposites();
CodeGenRegister *SR = SubRegs[Idx];
const SubRegMap &Map = SR->computeSubRegs(RegBank);
// Look at the possible compositions of Idx.
// They may not all be supported by SR.
for (CodeGenSubRegIndex::CompMap::const_iterator I = Comps.begin(),
E = Comps.end(); I != E; ++I) {
SubRegMap::const_iterator SRI = Map.find(I->first);
if (SRI == Map.end())
continue; // Idx + I->first doesn't exist in SR.
// Add I->second as a name for the subreg SRI->second, assuming it is
// orphaned, and the name isn't already used for something else.
if (SubRegs.count(I->second) || !Orphans.erase(SRI->second))
continue;
// We found a new name for the orphaned sub-register.
SubRegs.insert(std::make_pair(I->second, SRI->second));
Indices.push_back(I->second);
}
}
// Now Orphans contains the inherited subregisters without a direct index.
// Create inferred indexes for all missing entries.
// Work backwards in the Indices vector in order to compose subregs bottom-up.
// Consider this subreg sequence:
//
// qsub_1 -> dsub_0 -> ssub_0
//
// The qsub_1 -> dsub_0 composition becomes dsub_2, so the ssub_0 register
// can be reached in two different ways:
//
// qsub_1 -> ssub_0
// dsub_2 -> ssub_0
//
// We pick the latter composition because another register may have [dsub_0,
// dsub_1, dsub_2] subregs without necessarily having a qsub_1 subreg. The
// dsub_2 -> ssub_0 composition can be shared.
while (!Indices.empty() && !Orphans.empty()) {
CodeGenSubRegIndex *Idx = Indices.pop_back_val();
CodeGenRegister *SR = SubRegs[Idx];
const SubRegMap &Map = SR->computeSubRegs(RegBank);
for (SubRegMap::const_iterator SI = Map.begin(), SE = Map.end(); SI != SE;
++SI)
if (Orphans.erase(SI->second))
SubRegs[RegBank.getCompositeSubRegIndex(Idx, SI->first)] = SI->second;
}
// Compute the inverse SubReg -> Idx map.
for (SubRegMap::const_iterator SI = SubRegs.begin(), SE = SubRegs.end();
SI != SE; ++SI) {
if (SI->second == this) {
ArrayRef<SMLoc> Loc;
if (TheDef)
Loc = TheDef->getLoc();
PrintFatalError(Loc, "Register " + getName() +
" has itself as a sub-register");
}
// Compute AllSuperRegsCovered.
if (!CoveredBySubRegs)
SI->first->AllSuperRegsCovered = false;
// Ensure that every sub-register has a unique name.
DenseMap<const CodeGenRegister*, CodeGenSubRegIndex*>::iterator Ins =
SubReg2Idx.insert(std::make_pair(SI->second, SI->first)).first;
if (Ins->second == SI->first)
continue;
// Trouble: Two different names for SI->second.
ArrayRef<SMLoc> Loc;
if (TheDef)
Loc = TheDef->getLoc();
PrintFatalError(Loc, "Sub-register can't have two names: " +
SI->second->getName() + " available as " +
SI->first->getName() + " and " + Ins->second->getName());
}
// Derive possible names for sub-register concatenations from any explicit
// sub-registers. By doing this before computeSecondarySubRegs(), we ensure
// that getConcatSubRegIndex() won't invent any concatenated indices that the
// user already specified.
for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
CodeGenRegister *SR = ExplicitSubRegs[i];
if (!SR->CoveredBySubRegs || SR->ExplicitSubRegs.size() <= 1)
continue;
// SR is composed of multiple sub-regs. Find their names in this register.
SmallVector<CodeGenSubRegIndex*, 8> Parts;
for (unsigned j = 0, e = SR->ExplicitSubRegs.size(); j != e; ++j)
Parts.push_back(getSubRegIndex(SR->ExplicitSubRegs[j]));
// Offer this as an existing spelling for the concatenation of Parts.
RegBank.addConcatSubRegIndex(Parts, ExplicitSubRegIndices[i]);
}
// Initialize RegUnitList. Because getSubRegs is called recursively, this
// processes the register hierarchy in postorder.
//
// Inherit all sub-register units. It is good enough to look at the explicit
// sub-registers, the other registers won't contribute any more units.
for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
CodeGenRegister *SR = ExplicitSubRegs[i];
RegUnits |= SR->RegUnits;
}
// Absent any ad hoc aliasing, we create one register unit per leaf register.
// These units correspond to the maximal cliques in the register overlap
// graph which is optimal.
//
// When there is ad hoc aliasing, we simply create one unit per edge in the
// undirected ad hoc aliasing graph. Technically, we could do better by
// identifying maximal cliques in the ad hoc graph, but cliques larger than 2
// are extremely rare anyway (I've never seen one), so we don't bother with
// the added complexity.
for (unsigned i = 0, e = ExplicitAliases.size(); i != e; ++i) {
CodeGenRegister *AR = ExplicitAliases[i];
// Only visit each edge once.
if (AR->SubRegsComplete)
continue;
// Create a RegUnit representing this alias edge, and add it to both
// registers.
unsigned Unit = RegBank.newRegUnit(this, AR);
RegUnits.set(Unit);
AR->RegUnits.set(Unit);
}
// Finally, create units for leaf registers without ad hoc aliases. Note that
// a leaf register with ad hoc aliases doesn't get its own unit - it isn't
// necessary. This means the aliasing leaf registers can share a single unit.
if (RegUnits.empty())
RegUnits.set(RegBank.newRegUnit(this));
// We have now computed the native register units. More may be adopted later
// for balancing purposes.
NativeRegUnits = RegUnits;
return SubRegs;
}
// In a register that is covered by its sub-registers, try to find redundant
// sub-registers. For example:
//
// QQ0 = {Q0, Q1}
// Q0 = {D0, D1}
// Q1 = {D2, D3}
//
// We can infer that D1_D2 is also a sub-register, even if it wasn't named in
// the register definition.
//
// The explicitly specified registers form a tree. This function discovers
// sub-register relationships that would force a DAG.
//
void CodeGenRegister::computeSecondarySubRegs(CodeGenRegBank &RegBank) {
// Collect new sub-registers first, add them later.
SmallVector<SubRegMap::value_type, 8> NewSubRegs;
// Look at the leading super-registers of each sub-register. Those are the
// candidates for new sub-registers, assuming they are fully contained in
// this register.
for (SubRegMap::iterator I = SubRegs.begin(), E = SubRegs.end(); I != E; ++I){
const CodeGenRegister *SubReg = I->second;
const CodeGenRegister::SuperRegList &Leads = SubReg->LeadingSuperRegs;
for (unsigned i = 0, e = Leads.size(); i != e; ++i) {
CodeGenRegister *Cand = const_cast<CodeGenRegister*>(Leads[i]);
// Already got this sub-register?
if (Cand == this || getSubRegIndex(Cand))
continue;
// Check if each component of Cand is already a sub-register.
// We know that the first component is I->second, and is present with the
// name I->first.
SmallVector<CodeGenSubRegIndex*, 8> Parts(1, I->first);
assert(!Cand->ExplicitSubRegs.empty() &&
"Super-register has no sub-registers");
for (unsigned j = 1, e = Cand->ExplicitSubRegs.size(); j != e; ++j) {
if (CodeGenSubRegIndex *Idx = getSubRegIndex(Cand->ExplicitSubRegs[j]))
Parts.push_back(Idx);
else {
// Sub-register doesn't exist.
Parts.clear();
break;
}
}
// If some Cand sub-register is not part of this register, or if Cand only
// has one sub-register, there is nothing to do.
if (Parts.size() <= 1)
continue;
// Each part of Cand is a sub-register of this. Make the full Cand also
// a sub-register with a concatenated sub-register index.
CodeGenSubRegIndex *Concat= RegBank.getConcatSubRegIndex(Parts);
NewSubRegs.push_back(std::make_pair(Concat, Cand));
}
}
// Now add all the new sub-registers.
for (unsigned i = 0, e = NewSubRegs.size(); i != e; ++i) {
// Don't add Cand if another sub-register is already using the index.
if (!SubRegs.insert(NewSubRegs[i]).second)
continue;
CodeGenSubRegIndex *NewIdx = NewSubRegs[i].first;
CodeGenRegister *NewSubReg = NewSubRegs[i].second;
SubReg2Idx.insert(std::make_pair(NewSubReg, NewIdx));
}
// Create sub-register index composition maps for the synthesized indices.
for (unsigned i = 0, e = NewSubRegs.size(); i != e; ++i) {
CodeGenSubRegIndex *NewIdx = NewSubRegs[i].first;
CodeGenRegister *NewSubReg = NewSubRegs[i].second;
for (SubRegMap::const_iterator SI = NewSubReg->SubRegs.begin(),
SE = NewSubReg->SubRegs.end(); SI != SE; ++SI) {
CodeGenSubRegIndex *SubIdx = getSubRegIndex(SI->second);
if (!SubIdx)
PrintFatalError(TheDef->getLoc(), "No SubRegIndex for " +
SI->second->getName() + " in " + getName());
NewIdx->addComposite(SI->first, SubIdx);
}
}
}
void CodeGenRegister::computeSuperRegs(CodeGenRegBank &RegBank) {
// Only visit each register once.
if (SuperRegsComplete)
return;
SuperRegsComplete = true;
// Make sure all sub-registers have been visited first, so the super-reg
// lists will be topologically ordered.
for (SubRegMap::const_iterator I = SubRegs.begin(), E = SubRegs.end();
I != E; ++I)
I->second->computeSuperRegs(RegBank);
// Now add this as a super-register on all sub-registers.
// Also compute the TopoSigId in post-order.
TopoSigId Id;
for (SubRegMap::const_iterator I = SubRegs.begin(), E = SubRegs.end();
I != E; ++I) {
// Topological signature computed from SubIdx, TopoId(SubReg).
// Loops and idempotent indices have TopoSig = ~0u.
Id.push_back(I->first->EnumValue);
Id.push_back(I->second->TopoSig);
// Don't add duplicate entries.
if (!I->second->SuperRegs.empty() && I->second->SuperRegs.back() == this)
continue;
I->second->SuperRegs.push_back(this);
}
TopoSig = RegBank.getTopoSig(Id);
}
void
CodeGenRegister::addSubRegsPreOrder(SetVector<const CodeGenRegister*> &OSet,
CodeGenRegBank &RegBank) const {
assert(SubRegsComplete && "Must precompute sub-registers");
for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
CodeGenRegister *SR = ExplicitSubRegs[i];
if (OSet.insert(SR))
SR->addSubRegsPreOrder(OSet, RegBank);
}
// Add any secondary sub-registers that weren't part of the explicit tree.
for (SubRegMap::const_iterator I = SubRegs.begin(), E = SubRegs.end();
I != E; ++I)
OSet.insert(I->second);
}
// Get the sum of this register's unit weights.
unsigned CodeGenRegister::getWeight(const CodeGenRegBank &RegBank) const {
unsigned Weight = 0;
for (RegUnitList::iterator I = RegUnits.begin(), E = RegUnits.end();
I != E; ++I) {
Weight += RegBank.getRegUnit(*I).Weight;
}
return Weight;
}
//===----------------------------------------------------------------------===//
// RegisterTuples
//===----------------------------------------------------------------------===//
// A RegisterTuples def is used to generate pseudo-registers from lists of
// sub-registers. We provide a SetTheory expander class that returns the new
// registers.
namespace {
struct TupleExpander : SetTheory::Expander {
void expand(SetTheory &ST, Record *Def, SetTheory::RecSet &Elts) override {
std::vector<Record*> Indices = Def->getValueAsListOfDefs("SubRegIndices");
unsigned Dim = Indices.size();
ListInit *SubRegs = Def->getValueAsListInit("SubRegs");
if (Dim != SubRegs->size())
PrintFatalError(Def->getLoc(), "SubRegIndices and SubRegs size mismatch");
if (Dim < 2)
PrintFatalError(Def->getLoc(),
"Tuples must have at least 2 sub-registers");
// Evaluate the sub-register lists to be zipped.
unsigned Length = ~0u;
SmallVector<SetTheory::RecSet, 4> Lists(Dim);
for (unsigned i = 0; i != Dim; ++i) {
ST.evaluate(SubRegs->getElement(i), Lists[i], Def->getLoc());
Length = std::min(Length, unsigned(Lists[i].size()));
}
if (Length == 0)
return;
// Precompute some types.
Record *RegisterCl = Def->getRecords().getClass("Register");
RecTy *RegisterRecTy = RecordRecTy::get(RegisterCl);
StringInit *BlankName = StringInit::get("");
// Zip them up.
for (unsigned n = 0; n != Length; ++n) {
std::string Name;
Record *Proto = Lists[0][n];
std::vector<Init*> Tuple;
unsigned CostPerUse = 0;
for (unsigned i = 0; i != Dim; ++i) {
Record *Reg = Lists[i][n];
if (i) Name += '_';
Name += Reg->getName();
Tuple.push_back(DefInit::get(Reg));
CostPerUse = std::max(CostPerUse,
unsigned(Reg->getValueAsInt("CostPerUse")));
}
// Create a new Record representing the synthesized register. This record
// is only for consumption by CodeGenRegister, it is not added to the
// RecordKeeper.
Record *NewReg = new Record(Name, Def->getLoc(), Def->getRecords());
Elts.insert(NewReg);
// Copy Proto super-classes.
ArrayRef<Record *> Supers = Proto->getSuperClasses();
ArrayRef<SMRange> Ranges = Proto->getSuperClassRanges();
for (unsigned i = 0, e = Supers.size(); i != e; ++i)
NewReg->addSuperClass(Supers[i], Ranges[i]);
// Copy Proto fields.
for (unsigned i = 0, e = Proto->getValues().size(); i != e; ++i) {
RecordVal RV = Proto->getValues()[i];
// Skip existing fields, like NAME.
if (NewReg->getValue(RV.getNameInit()))
continue;
StringRef Field = RV.getName();
// Replace the sub-register list with Tuple.
if (Field == "SubRegs")
RV.setValue(ListInit::get(Tuple, RegisterRecTy));
// Provide a blank AsmName. MC hacks are required anyway.
if (Field == "AsmName")
RV.setValue(BlankName);
// CostPerUse is aggregated from all Tuple members.
if (Field == "CostPerUse")
RV.setValue(IntInit::get(CostPerUse));
// Composite registers are always covered by sub-registers.
if (Field == "CoveredBySubRegs")
RV.setValue(BitInit::get(true));
// Copy fields from the RegisterTuples def.
if (Field == "SubRegIndices" ||
Field == "CompositeIndices") {
NewReg->addValue(*Def->getValue(Field));
continue;
}
// Some fields get their default uninitialized value.
if (Field == "DwarfNumbers" ||
Field == "DwarfAlias" ||
Field == "Aliases") {
if (const RecordVal *DefRV = RegisterCl->getValue(Field))
NewReg->addValue(*DefRV);
continue;
}
// Everything else is copied from Proto.
NewReg->addValue(RV);
}
}
}
};
}
//===----------------------------------------------------------------------===//
// CodeGenRegisterClass
//===----------------------------------------------------------------------===//
static void sortAndUniqueRegisters(CodeGenRegister::Vec &M) {
std::sort(M.begin(), M.end(), deref<llvm::less>());
M.erase(std::unique(M.begin(), M.end(), deref<llvm::equal>()), M.end());
}
CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R)
: TheDef(R),
Name(R->getName()),
TopoSigs(RegBank.getNumTopoSigs()),
EnumValue(-1),
LaneMask(0) {
// Rename anonymous register classes.
if (R->getName().size() > 9 && R->getName()[9] == '.') {
static unsigned AnonCounter = 0;
R->setName("AnonRegClass_" + utostr(AnonCounter++));
}
std::vector<Record*> TypeList = R->getValueAsListOfDefs("RegTypes");
for (unsigned i = 0, e = TypeList.size(); i != e; ++i) {
Record *Type = TypeList[i];
if (!Type->isSubClassOf("ValueType"))
PrintFatalError("RegTypes list member '" + Type->getName() +
"' does not derive from the ValueType class!");
VTs.push_back(getValueType(Type));
}
assert(!VTs.empty() && "RegisterClass must contain at least one ValueType!");
// Allocation order 0 is the full set. AltOrders provides others.
const SetTheory::RecVec *Elements = RegBank.getSets().expand(R);
ListInit *AltOrders = R->getValueAsListInit("AltOrders");
Orders.resize(1 + AltOrders->size());
// Default allocation order always contains all registers.
for (unsigned i = 0, e = Elements->size(); i != e; ++i) {
Orders[0].push_back((*Elements)[i]);
const CodeGenRegister *Reg = RegBank.getReg((*Elements)[i]);
Members.push_back(Reg);
TopoSigs.set(Reg->getTopoSig());
}
sortAndUniqueRegisters(Members);
// Alternative allocation orders may be subsets.
SetTheory::RecSet Order;
for (unsigned i = 0, e = AltOrders->size(); i != e; ++i) {
RegBank.getSets().evaluate(AltOrders->getElement(i), Order, R->getLoc());
Orders[1 + i].append(Order.begin(), Order.end());
// Verify that all altorder members are regclass members.
while (!Order.empty()) {
CodeGenRegister *Reg = RegBank.getReg(Order.back());
Order.pop_back();
if (!contains(Reg))
PrintFatalError(R->getLoc(), " AltOrder register " + Reg->getName() +
" is not a class member");
}
}
// Allow targets to override the size in bits of the RegisterClass.
unsigned Size = R->getValueAsInt("Size");
Namespace = R->getValueAsString("Namespace");
SpillSize = Size ? Size : MVT(VTs[0]).getSizeInBits();
SpillAlignment = R->getValueAsInt("Alignment");
CopyCost = R->getValueAsInt("CopyCost");
Allocatable = R->getValueAsBit("isAllocatable");
AltOrderSelect = R->getValueAsString("AltOrderSelect");
int AllocationPriority = R->getValueAsInt("AllocationPriority");
if (AllocationPriority < 0 || AllocationPriority > 63)
PrintFatalError(R->getLoc(), "AllocationPriority out of range [0,63]");
this->AllocationPriority = AllocationPriority;
}
// Create an inferred register class that was missing from the .td files.
// Most properties will be inherited from the closest super-class after the
// class structure has been computed.
CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank,
StringRef Name, Key Props)
: Members(*Props.Members),
TheDef(nullptr),
Name(Name),
TopoSigs(RegBank.getNumTopoSigs()),
EnumValue(-1),
SpillSize(Props.SpillSize),
SpillAlignment(Props.SpillAlignment),
CopyCost(0),
Allocatable(true),
AllocationPriority(0) {
for (const auto R : Members)
TopoSigs.set(R->getTopoSig());
}
// Compute inherited propertied for a synthesized register class.
void CodeGenRegisterClass::inheritProperties(CodeGenRegBank &RegBank) {
assert(!getDef() && "Only synthesized classes can inherit properties");
assert(!SuperClasses.empty() && "Synthesized class without super class");
// The last super-class is the smallest one.
CodeGenRegisterClass &Super = *SuperClasses.back();
// Most properties are copied directly.
// Exceptions are members, size, and alignment
Namespace = Super.Namespace;
VTs = Super.VTs;
CopyCost = Super.CopyCost;
Allocatable = Super.Allocatable;
AltOrderSelect = Super.AltOrderSelect;
AllocationPriority = Super.AllocationPriority;
// Copy all allocation orders, filter out foreign registers from the larger
// super-class.
Orders.resize(Super.Orders.size());
for (unsigned i = 0, ie = Super.Orders.size(); i != ie; ++i)
for (unsigned j = 0, je = Super.Orders[i].size(); j != je; ++j)
if (contains(RegBank.getReg(Super.Orders[i][j])))
Orders[i].push_back(Super.Orders[i][j]);
}
bool CodeGenRegisterClass::contains(const CodeGenRegister *Reg) const {
return std::binary_search(Members.begin(), Members.end(), Reg,
deref<llvm::less>());
}
namespace llvm {
raw_ostream &operator<<(raw_ostream &OS, const CodeGenRegisterClass::Key &K) {
OS << "{ S=" << K.SpillSize << ", A=" << K.SpillAlignment;
for (const auto R : *K.Members)
OS << ", " << R->getName();
return OS << " }";
}
}
// This is a simple lexicographical order that can be used to search for sets.
// It is not the same as the topological order provided by TopoOrderRC.
bool CodeGenRegisterClass::Key::
operator<(const CodeGenRegisterClass::Key &B) const {
assert(Members && B.Members);
return std::tie(*Members, SpillSize, SpillAlignment) <
std::tie(*B.Members, B.SpillSize, B.SpillAlignment);
}
// Returns true if RC is a strict subclass.
// RC is a sub-class of this class if it is a valid replacement for any
// instruction operand where a register of this classis required. It must
// satisfy these conditions:
//
// 1. All RC registers are also in this.
// 2. The RC spill size must not be smaller than our spill size.
// 3. RC spill alignment must be compatible with ours.
//
static bool testSubClass(const CodeGenRegisterClass *A,
const CodeGenRegisterClass *B) {
return A->SpillAlignment && B->SpillAlignment % A->SpillAlignment == 0 &&
A->SpillSize <= B->SpillSize &&
std::includes(A->getMembers().begin(), A->getMembers().end(),
B->getMembers().begin(), B->getMembers().end(),
deref<llvm::less>());
}
/// Sorting predicate for register classes. This provides a topological
/// ordering that arranges all register classes before their sub-classes.
///
/// Register classes with the same registers, spill size, and alignment form a
/// clique. They will be ordered alphabetically.
///
static bool __cdecl TopoOrderRC(const CodeGenRegisterClass &PA, // HLSL Change - __cdecl
const CodeGenRegisterClass &PB) {
auto *A = &PA;
auto *B = &PB;
if (A == B)
return 0;
// Order by ascending spill size.
if (A->SpillSize < B->SpillSize)
return true;
if (A->SpillSize > B->SpillSize)
return false;
// Order by ascending spill alignment.
if (A->SpillAlignment < B->SpillAlignment)
return true;
if (A->SpillAlignment > B->SpillAlignment)
return false;
// Order by descending set size. Note that the classes' allocation order may
// not have been computed yet. The Members set is always vaild.
if (A->getMembers().size() > B->getMembers().size())
return true;
if (A->getMembers().size() < B->getMembers().size())
return false;
// Finally order by name as a tie breaker.
return StringRef(A->getName()) < B->getName();
}
std::string CodeGenRegisterClass::getQualifiedName() const {
if (Namespace.empty())
return getName();
else
return Namespace + "::" + getName();
}
// Compute sub-classes of all register classes.
// Assume the classes are ordered topologically.
void CodeGenRegisterClass::computeSubClasses(CodeGenRegBank &RegBank) {
auto &RegClasses = RegBank.getRegClasses();
// Visit backwards so sub-classes are seen first.
for (auto I = RegClasses.rbegin(), E = RegClasses.rend(); I != E; ++I) {
CodeGenRegisterClass &RC = *I;
RC.SubClasses.resize(RegClasses.size());
RC.SubClasses.set(RC.EnumValue);
// Normally, all subclasses have IDs >= rci, unless RC is part of a clique.
for (auto I2 = I.base(), E2 = RegClasses.end(); I2 != E2; ++I2) {
CodeGenRegisterClass &SubRC = *I2;
if (RC.SubClasses.test(SubRC.EnumValue))
continue;
if (!testSubClass(&RC, &SubRC))
continue;
// SubRC is a sub-class. Grap all its sub-classes so we won't have to
// check them again.
RC.SubClasses |= SubRC.SubClasses;
}
// Sweep up missed clique members. They will be immediately preceding RC.
for (auto I2 = std::next(I); I2 != E && testSubClass(&RC, &*I2); ++I2)
RC.SubClasses.set(I2->EnumValue);
}
// Compute the SuperClasses lists from the SubClasses vectors.
for (auto &RC : RegClasses) {
const BitVector &SC = RC.getSubClasses();
auto I = RegClasses.begin();
for (int s = 0, next_s = SC.find_first(); next_s != -1;
next_s = SC.find_next(s)) {
std::advance(I, next_s - s);
s = next_s;
if (&*I == &RC)
continue;
I->SuperClasses.push_back(&RC);
}
}
// With the class hierarchy in place, let synthesized register classes inherit
// properties from their closest super-class. The iteration order here can
// propagate properties down multiple levels.
for (auto &RC : RegClasses)
if (!RC.getDef())
RC.inheritProperties(RegBank);
}
void CodeGenRegisterClass::getSuperRegClasses(const CodeGenSubRegIndex *SubIdx,
BitVector &Out) const {
auto FindI = SuperRegClasses.find(SubIdx);
if (FindI == SuperRegClasses.end())
return;
for (CodeGenRegisterClass *RC : FindI->second)
Out.set(RC->EnumValue);
}
// Populate a unique sorted list of units from a register set.
void CodeGenRegisterClass::buildRegUnitSet(
std::vector<unsigned> &RegUnits) const {
std::vector<unsigned> TmpUnits;
for (RegUnitIterator UnitI(Members); UnitI.isValid(); ++UnitI)
TmpUnits.push_back(*UnitI);
std::sort(TmpUnits.begin(), TmpUnits.end());
std::unique_copy(TmpUnits.begin(), TmpUnits.end(),
std::back_inserter(RegUnits));
}
//===----------------------------------------------------------------------===//
// CodeGenRegBank
//===----------------------------------------------------------------------===//
CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) {
// Configure register Sets to understand register classes and tuples.
Sets.addFieldExpander("RegisterClass", "MemberList");
Sets.addFieldExpander("CalleeSavedRegs", "SaveList");
Sets.addExpander("RegisterTuples", llvm::make_unique<TupleExpander>());
// Read in the user-defined (named) sub-register indices.
// More indices will be synthesized later.
std::vector<Record*> SRIs = Records.getAllDerivedDefinitions("SubRegIndex");
std::sort(SRIs.begin(), SRIs.end(), LessRecord());
for (unsigned i = 0, e = SRIs.size(); i != e; ++i)
getSubRegIdx(SRIs[i]);
// Build composite maps from ComposedOf fields.
for (auto &Idx : SubRegIndices)
Idx.updateComponents(*this);
// Read in the register definitions.
std::vector<Record*> Regs = Records.getAllDerivedDefinitions("Register");
std::sort(Regs.begin(), Regs.end(), LessRecordRegister());
// Assign the enumeration values.
for (unsigned i = 0, e = Regs.size(); i != e; ++i)
getReg(Regs[i]);
// Expand tuples and number the new registers.
std::vector<Record*> Tups =
Records.getAllDerivedDefinitions("RegisterTuples");
for (Record *R : Tups) {
std::vector<Record *> TupRegs = *Sets.expand(R);
std::sort(TupRegs.begin(), TupRegs.end(), LessRecordRegister());
for (Record *RC : TupRegs)
getReg(RC);
}
// Now all the registers are known. Build the object graph of explicit
// register-register references.
for (auto &Reg : Registers)
Reg.buildObjectGraph(*this);
// Compute register name map.
for (auto &Reg : Registers)
// FIXME: This could just be RegistersByName[name] = register, except that
// causes some failures in MIPS - perhaps they have duplicate register name
// entries? (or maybe there's a reason for it - I don't know much about this
// code, just drive-by refactoring)
RegistersByName.insert(
std::make_pair(Reg.TheDef->getValueAsString("AsmName"), &Reg));
// Precompute all sub-register maps.
// This will create Composite entries for all inferred sub-register indices.
for (auto &Reg : Registers)
Reg.computeSubRegs(*this);
// Infer even more sub-registers by combining leading super-registers.
for (auto &Reg : Registers)
if (Reg.CoveredBySubRegs)
Reg.computeSecondarySubRegs(*this);
// After the sub-register graph is complete, compute the topologically
// ordered SuperRegs list.
for (auto &Reg : Registers)
Reg.computeSuperRegs(*this);
// Native register units are associated with a leaf register. They've all been
// discovered now.
NumNativeRegUnits = RegUnits.size();
// Read in register class definitions.
std::vector<Record*> RCs = Records.getAllDerivedDefinitions("RegisterClass");
if (RCs.empty())
PrintFatalError("No 'RegisterClass' subclasses defined!");
// Allocate user-defined register classes.
for (auto *RC : RCs) {
RegClasses.emplace_back(*this, RC);
addToMaps(&RegClasses.back());
}
// Infer missing classes to create a full algebra.
computeInferredRegisterClasses();
// Order register classes topologically and assign enum values.
RegClasses.sort(TopoOrderRC);
unsigned i = 0;
for (auto &RC : RegClasses)
RC.EnumValue = i++;
CodeGenRegisterClass::computeSubClasses(*this);
}
// Create a synthetic CodeGenSubRegIndex without a corresponding Record.
CodeGenSubRegIndex*
CodeGenRegBank::createSubRegIndex(StringRef Name, StringRef Namespace) {
SubRegIndices.emplace_back(Name, Namespace, SubRegIndices.size() + 1);
return &SubRegIndices.back();
}
CodeGenSubRegIndex *CodeGenRegBank::getSubRegIdx(Record *Def) {
CodeGenSubRegIndex *&Idx = Def2SubRegIdx[Def];
if (Idx)
return Idx;
SubRegIndices.emplace_back(Def, SubRegIndices.size() + 1);
Idx = &SubRegIndices.back();
return Idx;
}
CodeGenRegister *CodeGenRegBank::getReg(Record *Def) {
CodeGenRegister *&Reg = Def2Reg[Def];
if (Reg)
return Reg;
Registers.emplace_back(Def, Registers.size() + 1);
Reg = &Registers.back();
return Reg;
}
void CodeGenRegBank::addToMaps(CodeGenRegisterClass *RC) {
if (Record *Def = RC->getDef())
Def2RC.insert(std::make_pair(Def, RC));
// Duplicate classes are rejected by insert().
// That's OK, we only care about the properties handled by CGRC::Key.
CodeGenRegisterClass::Key K(*RC);
Key2RC.insert(std::make_pair(K, RC));
}
// Create a synthetic sub-class if it is missing.
CodeGenRegisterClass*
CodeGenRegBank::getOrCreateSubClass(const CodeGenRegisterClass *RC,
const CodeGenRegister::Vec *Members,
StringRef Name) {
// Synthetic sub-class has the same size and alignment as RC.
CodeGenRegisterClass::Key K(Members, RC->SpillSize, RC->SpillAlignment);
RCKeyMap::const_iterator FoundI = Key2RC.find(K);
if (FoundI != Key2RC.end())
return FoundI->second;
// Sub-class doesn't exist, create a new one.
RegClasses.emplace_back(*this, Name, K);
addToMaps(&RegClasses.back());
return &RegClasses.back();
}
CodeGenRegisterClass *CodeGenRegBank::getRegClass(Record *Def) {
if (CodeGenRegisterClass *RC = Def2RC[Def])
return RC;
PrintFatalError(Def->getLoc(), "Not a known RegisterClass!");
}
CodeGenSubRegIndex*
CodeGenRegBank::getCompositeSubRegIndex(CodeGenSubRegIndex *A,
CodeGenSubRegIndex *B) {
// Look for an existing entry.
CodeGenSubRegIndex *Comp = A->compose(B);
if (Comp)
return Comp;
// None exists, synthesize one.
std::string Name = A->getName() + "_then_" + B->getName();
Comp = createSubRegIndex(Name, A->getNamespace());
A->addComposite(B, Comp);
return Comp;
}
CodeGenSubRegIndex *CodeGenRegBank::
getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8> &Parts) {
assert(Parts.size() > 1 && "Need two parts to concatenate");
// Look for an existing entry.
CodeGenSubRegIndex *&Idx = ConcatIdx[Parts];
if (Idx)
return Idx;
// None exists, synthesize one.
std::string Name = Parts.front()->getName();
// Determine whether all parts are contiguous.
bool isContinuous = true;
unsigned Size = Parts.front()->Size;
unsigned LastOffset = Parts.front()->Offset;
unsigned LastSize = Parts.front()->Size;
for (unsigned i = 1, e = Parts.size(); i != e; ++i) {
Name += '_';
Name += Parts[i]->getName();
Size += Parts[i]->Size;
if (Parts[i]->Offset != (LastOffset + LastSize))
isContinuous = false;
LastOffset = Parts[i]->Offset;
LastSize = Parts[i]->Size;
}
Idx = createSubRegIndex(Name, Parts.front()->getNamespace());
Idx->Size = Size;
Idx->Offset = isContinuous ? Parts.front()->Offset : -1;
return Idx;
}
void CodeGenRegBank::computeComposites() {
// Keep track of TopoSigs visited. We only need to visit each TopoSig once,
// and many registers will share TopoSigs on regular architectures.
BitVector TopoSigs(getNumTopoSigs());
for (const auto &Reg1 : Registers) {
// Skip identical subreg structures already processed.
if (TopoSigs.test(Reg1.getTopoSig()))
continue;
TopoSigs.set(Reg1.getTopoSig());
const CodeGenRegister::SubRegMap &SRM1 = Reg1.getSubRegs();
for (CodeGenRegister::SubRegMap::const_iterator i1 = SRM1.begin(),
e1 = SRM1.end(); i1 != e1; ++i1) {
CodeGenSubRegIndex *Idx1 = i1->first;
CodeGenRegister *Reg2 = i1->second;
// Ignore identity compositions.
if (&Reg1 == Reg2)
continue;
const CodeGenRegister::SubRegMap &SRM2 = Reg2->getSubRegs();
// Try composing Idx1 with another SubRegIndex.
for (CodeGenRegister::SubRegMap::const_iterator i2 = SRM2.begin(),
e2 = SRM2.end(); i2 != e2; ++i2) {
CodeGenSubRegIndex *Idx2 = i2->first;
CodeGenRegister *Reg3 = i2->second;
// Ignore identity compositions.
if (Reg2 == Reg3)
continue;
// OK Reg1:IdxPair == Reg3. Find the index with Reg:Idx == Reg3.
CodeGenSubRegIndex *Idx3 = Reg1.getSubRegIndex(Reg3);
assert(Idx3 && "Sub-register doesn't have an index");
// Conflicting composition? Emit a warning but allow it.
if (CodeGenSubRegIndex *Prev = Idx1->addComposite(Idx2, Idx3))
PrintWarning(Twine("SubRegIndex ") + Idx1->getQualifiedName() +
" and " + Idx2->getQualifiedName() +
" compose ambiguously as " + Prev->getQualifiedName() +
" or " + Idx3->getQualifiedName());
}
}
}
}
// Compute lane masks. This is similar to register units, but at the
// sub-register index level. Each bit in the lane mask is like a register unit
// class, and two lane masks will have a bit in common if two sub-register
// indices overlap in some register.
//
// Conservatively share a lane mask bit if two sub-register indices overlap in
// some registers, but not in others. That shouldn't happen a lot.
void CodeGenRegBank::computeSubRegLaneMasks() {
// First assign individual bits to all the leaf indices.
unsigned Bit = 0;
// Determine mask of lanes that cover their registers.
CoveringLanes = ~0u;
for (auto &Idx : SubRegIndices) {
if (Idx.getComposites().empty()) {
Idx.LaneMask = 1u << Bit;
// Share bit 31 in the unlikely case there are more than 32 leafs.
//
// Sharing bits is harmless; it allows graceful degradation in targets
// with more than 32 vector lanes. They simply get a limited resolution
// view of lanes beyond the 32nd.
//
// See also the comment for getSubRegIndexLaneMask().
if (Bit < 31)
++Bit;
else
// Once bit 31 is shared among multiple leafs, the 'lane' it represents
// is no longer covering its registers.
CoveringLanes &= ~(1u << Bit);
} else {
Idx.LaneMask = 0;
}
}
// Compute transformation sequences for composeSubRegIndexLaneMask. The idea
// here is that for each possible target subregister we look at the leafs
// in the subregister graph that compose for this target and create
// transformation sequences for the lanemasks. Each step in the sequence
// consists of a bitmask and a bitrotate operation. As the rotation amounts
// are usually the same for many subregisters we can easily combine the steps
// by combining the masks.
for (const auto &Idx : SubRegIndices) {
const auto &Composites = Idx.getComposites();
auto &LaneTransforms = Idx.CompositionLaneMaskTransform;
// Go through all leaf subregisters and find the ones that compose with Idx.
// These make out all possible valid bits in the lane mask we want to
// transform. Looking only at the leafs ensure that only a single bit in
// the mask is set.
unsigned NextBit = 0;
for (auto &Idx2 : SubRegIndices) {
// Skip non-leaf subregisters.
if (!Idx2.getComposites().empty())
continue;
// Replicate the behaviour from the lane mask generation loop above.
unsigned SrcBit = NextBit;
unsigned SrcMask = 1u << SrcBit;
if (NextBit < 31)
++NextBit;
assert(Idx2.LaneMask == SrcMask);
// Get the composed subregister if there is any.
auto C = Composites.find(&Idx2);
if (C == Composites.end())
continue;
const CodeGenSubRegIndex *Composite = C->second;
// The Composed subreg should be a leaf subreg too
assert(Composite->getComposites().empty());
// Create Mask+Rotate operation and merge with existing ops if possible.
unsigned DstBit = Log2_32(Composite->LaneMask);
int Shift = DstBit - SrcBit;
uint8_t RotateLeft = Shift >= 0 ? (uint8_t)Shift : 32+Shift;
for (auto &I : LaneTransforms) {
if (I.RotateLeft == RotateLeft) {
I.Mask |= SrcMask;
SrcMask = 0;
}
}
if (SrcMask != 0) {
MaskRolPair MaskRol = { SrcMask, RotateLeft };
LaneTransforms.push_back(MaskRol);
}
}
// Optimize if the transformation consists of one step only: Set mask to
// 0xffffffff (including some irrelevant invalid bits) so that it should
// merge with more entries later while compressing the table.
if (LaneTransforms.size() == 1)
LaneTransforms[0].Mask = ~0u;
// Further compression optimization: For invalid compositions resulting
// in a sequence with 0 entries we can just pick any other. Choose
// Mask 0xffffffff with Rotation 0.
if (LaneTransforms.size() == 0) {
MaskRolPair P = { ~0u, 0 };
LaneTransforms.push_back(P);
}
}
// FIXME: What if ad-hoc aliasing introduces overlaps that aren't represented
// by the sub-register graph? This doesn't occur in any known targets.
// Inherit lanes from composites.
for (const auto &Idx : SubRegIndices) {
unsigned Mask = Idx.computeLaneMask();
// If some super-registers without CoveredBySubRegs use this index, we can
// no longer assume that the lanes are covering their registers.
if (!Idx.AllSuperRegsCovered)
CoveringLanes &= ~Mask;
}
// Compute lane mask combinations for register classes.
for (auto &RegClass : RegClasses) {
unsigned LaneMask = 0;
for (const auto &SubRegIndex : SubRegIndices) {
if (RegClass.getSubClassWithSubReg(&SubRegIndex) == nullptr)
continue;
LaneMask |= SubRegIndex.LaneMask;
}
RegClass.LaneMask = LaneMask;
}
}
namespace {
// UberRegSet is a helper class for computeRegUnitWeights. Each UberRegSet is
// the transitive closure of the union of overlapping register
// classes. Together, the UberRegSets form a partition of the registers. If we
// consider overlapping register classes to be connected, then each UberRegSet
// is a set of connected components.
//
// An UberRegSet will likely be a horizontal slice of register names of
// the same width. Nontrivial subregisters should then be in a separate
// UberRegSet. But this property isn't required for valid computation of
// register unit weights.
//
// A Weight field caches the max per-register unit weight in each UberRegSet.
//
// A set of SingularDeterminants flags single units of some register in this set
// for which the unit weight equals the set weight. These units should not have
// their weight increased.
struct UberRegSet {
CodeGenRegister::Vec Regs;
unsigned Weight;
CodeGenRegister::RegUnitList SingularDeterminants;
UberRegSet(): Weight(0) {}
};
} // namespace
// Partition registers into UberRegSets, where each set is the transitive
// closure of the union of overlapping register classes.
//
// UberRegSets[0] is a special non-allocatable set.
static void computeUberSets(std::vector<UberRegSet> &UberSets,
std::vector<UberRegSet*> &RegSets,
CodeGenRegBank &RegBank) {
const auto &Registers = RegBank.getRegisters();
// The Register EnumValue is one greater than its index into Registers.
assert(Registers.size() == Registers.back().EnumValue &&
"register enum value mismatch");
// For simplicitly make the SetID the same as EnumValue.
IntEqClasses UberSetIDs(Registers.size()+1);
std::set<unsigned> AllocatableRegs;
for (auto &RegClass : RegBank.getRegClasses()) {
if (!RegClass.Allocatable)
continue;
const CodeGenRegister::Vec &Regs = RegClass.getMembers();
if (Regs.empty())
continue;
unsigned USetID = UberSetIDs.findLeader((*Regs.begin())->EnumValue);
assert(USetID && "register number 0 is invalid");
AllocatableRegs.insert((*Regs.begin())->EnumValue);
for (auto I = std::next(Regs.begin()), E = Regs.end(); I != E; ++I) {
AllocatableRegs.insert((*I)->EnumValue);
UberSetIDs.join(USetID, (*I)->EnumValue);
}
}
// Combine non-allocatable regs.
for (const auto &Reg : Registers) {
unsigned RegNum = Reg.EnumValue;
if (AllocatableRegs.count(RegNum))
continue;
UberSetIDs.join(0, RegNum);
}
UberSetIDs.compress();
// Make the first UberSet a special unallocatable set.
unsigned ZeroID = UberSetIDs[0];
// Insert Registers into the UberSets formed by union-find.
// Do not resize after this.
UberSets.resize(UberSetIDs.getNumClasses());
unsigned i = 0;
for (const CodeGenRegister &Reg : Registers) {
unsigned USetID = UberSetIDs[Reg.EnumValue];
if (!USetID)
USetID = ZeroID;
else if (USetID == ZeroID)
USetID = 0;
UberRegSet *USet = &UberSets[USetID];
USet->Regs.push_back(&Reg);
sortAndUniqueRegisters(USet->Regs);
RegSets[i++] = USet;
}
}
// Recompute each UberSet weight after changing unit weights.
static void computeUberWeights(std::vector<UberRegSet> &UberSets,
CodeGenRegBank &RegBank) {
// Skip the first unallocatable set.
for (std::vector<UberRegSet>::iterator I = std::next(UberSets.begin()),
E = UberSets.end(); I != E; ++I) {
// Initialize all unit weights in this set, and remember the max units/reg.
const CodeGenRegister *Reg = nullptr;
unsigned MaxWeight = 0, Weight = 0;
for (RegUnitIterator UnitI(I->Regs); UnitI.isValid(); ++UnitI) {
if (Reg != UnitI.getReg()) {
if (Weight > MaxWeight)
MaxWeight = Weight;
Reg = UnitI.getReg();
Weight = 0;
}
unsigned UWeight = RegBank.getRegUnit(*UnitI).Weight;
if (!UWeight) {
UWeight = 1;
RegBank.increaseRegUnitWeight(*UnitI, UWeight);
}
Weight += UWeight;
}
if (Weight > MaxWeight)
MaxWeight = Weight;
if (I->Weight != MaxWeight) {
DEBUG(
dbgs() << "UberSet " << I - UberSets.begin() << " Weight " << MaxWeight;
for (auto &Unit : I->Regs)
dbgs() << " " << Unit->getName();
dbgs() << "\n");
// Update the set weight.
I->Weight = MaxWeight;
}
// Find singular determinants.
for (const auto R : I->Regs) {
if (R->getRegUnits().count() == 1 && R->getWeight(RegBank) == I->Weight) {
I->SingularDeterminants |= R->getRegUnits();
}
}
}
}
// normalizeWeight is a computeRegUnitWeights helper that adjusts the weight of
// a register and its subregisters so that they have the same weight as their
// UberSet. Self-recursion processes the subregister tree in postorder so
// subregisters are normalized first.
//
// Side effects:
// - creates new adopted register units
// - causes superregisters to inherit adopted units
// - increases the weight of "singular" units
// - induces recomputation of UberWeights.
static bool normalizeWeight(CodeGenRegister *Reg,
std::vector<UberRegSet> &UberSets,
std::vector<UberRegSet*> &RegSets,
SparseBitVector<> &NormalRegs,
CodeGenRegister::RegUnitList &NormalUnits,
CodeGenRegBank &RegBank) {
if (NormalRegs.test(Reg->EnumValue))
return false;
NormalRegs.set(Reg->EnumValue);
bool Changed = false;
const CodeGenRegister::SubRegMap &SRM = Reg->getSubRegs();
for (CodeGenRegister::SubRegMap::const_iterator SRI = SRM.begin(),
SRE = SRM.end(); SRI != SRE; ++SRI) {
if (SRI->second == Reg)
continue; // self-cycles happen
Changed |= normalizeWeight(SRI->second, UberSets, RegSets,
NormalRegs, NormalUnits, RegBank);
}
// Postorder register normalization.
// Inherit register units newly adopted by subregisters.
if (Reg->inheritRegUnits(RegBank))
computeUberWeights(UberSets, RegBank);
// Check if this register is too skinny for its UberRegSet.
UberRegSet *UberSet = RegSets[RegBank.getRegIndex(Reg)];
unsigned RegWeight = Reg->getWeight(RegBank);
if (UberSet->Weight > RegWeight) {
// A register unit's weight can be adjusted only if it is the singular unit
// for this register, has not been used to normalize a subregister's set,
// and has not already been used to singularly determine this UberRegSet.
unsigned AdjustUnit = *Reg->getRegUnits().begin();
if (Reg->getRegUnits().count() != 1
|| hasRegUnit(NormalUnits, AdjustUnit)
|| hasRegUnit(UberSet->SingularDeterminants, AdjustUnit)) {
// We don't have an adjustable unit, so adopt a new one.
AdjustUnit = RegBank.newRegUnit(UberSet->Weight - RegWeight);
Reg->adoptRegUnit(AdjustUnit);
// Adopting a unit does not immediately require recomputing set weights.
}
else {
// Adjust the existing single unit.
RegBank.increaseRegUnitWeight(AdjustUnit, UberSet->Weight - RegWeight);
// The unit may be shared among sets and registers within this set.
computeUberWeights(UberSets, RegBank);
}
Changed = true;
}
// Mark these units normalized so superregisters can't change their weights.
NormalUnits |= Reg->getRegUnits();
return Changed;
}
// Compute a weight for each register unit created during getSubRegs.
//
// The goal is that two registers in the same class will have the same weight,
// where each register's weight is defined as sum of its units' weights.
void CodeGenRegBank::computeRegUnitWeights() {
std::vector<UberRegSet> UberSets;
std::vector<UberRegSet*> RegSets(Registers.size());
computeUberSets(UberSets, RegSets, *this);
// UberSets and RegSets are now immutable.
computeUberWeights(UberSets, *this);
// Iterate over each Register, normalizing the unit weights until reaching
// a fix point.
unsigned NumIters = 0;
for (bool Changed = true; Changed; ++NumIters) {
assert(NumIters <= NumNativeRegUnits && "Runaway register unit weights");
Changed = false;
for (auto &Reg : Registers) {
CodeGenRegister::RegUnitList NormalUnits;
SparseBitVector<> NormalRegs;
Changed |= normalizeWeight(&Reg, UberSets, RegSets, NormalRegs,
NormalUnits, *this);
}
}
}
// Find a set in UniqueSets with the same elements as Set.
// Return an iterator into UniqueSets.
static std::vector<RegUnitSet>::const_iterator
findRegUnitSet(const std::vector<RegUnitSet> &UniqueSets,
const RegUnitSet &Set) {
std::vector<RegUnitSet>::const_iterator
I = UniqueSets.begin(), E = UniqueSets.end();
for(;I != E; ++I) {
if (I->Units == Set.Units)
break;
}
return I;
}
// Return true if the RUSubSet is a subset of RUSuperSet.
static bool isRegUnitSubSet(const std::vector<unsigned> &RUSubSet,
const std::vector<unsigned> &RUSuperSet) {
return std::includes(RUSuperSet.begin(), RUSuperSet.end(),
RUSubSet.begin(), RUSubSet.end());
}
/// Iteratively prune unit sets. Prune subsets that are close to the superset,
/// but with one or two registers removed. We occasionally have registers like
/// APSR and PC thrown in with the general registers. We also see many
/// special-purpose register subsets, such as tail-call and Thumb
/// encodings. Generating all possible overlapping sets is combinatorial and
/// overkill for modeling pressure. Ideally we could fix this statically in
/// tablegen by (1) having the target define register classes that only include
/// the allocatable registers and marking other classes as non-allocatable and
/// (2) having a way to mark special purpose classes as "don't-care" classes for
/// the purpose of pressure. However, we make an attempt to handle targets that
/// are not nicely defined by merging nearly identical register unit sets
/// statically. This generates smaller tables. Then, dynamically, we adjust the
/// set limit by filtering the reserved registers.
///
/// Merge sets only if the units have the same weight. For example, on ARM,
/// Q-tuples with ssub index 0 include all S regs but also include D16+. We
/// should not expand the S set to include D regs.
void CodeGenRegBank::pruneUnitSets() {
assert(RegClassUnitSets.empty() && "this invalidates RegClassUnitSets");
// Form an equivalence class of UnitSets with no significant difference.
std::vector<unsigned> SuperSetIDs;
for (unsigned SubIdx = 0, EndIdx = RegUnitSets.size();
SubIdx != EndIdx; ++SubIdx) {
const RegUnitSet &SubSet = RegUnitSets[SubIdx];
unsigned SuperIdx = 0;
for (; SuperIdx != EndIdx; ++SuperIdx) {
if (SuperIdx == SubIdx)
continue;
unsigned UnitWeight = RegUnits[SubSet.Units[0]].Weight;
const RegUnitSet &SuperSet = RegUnitSets[SuperIdx];
if (isRegUnitSubSet(SubSet.Units, SuperSet.Units)
&& (SubSet.Units.size() + 3 > SuperSet.Units.size())
&& UnitWeight == RegUnits[SuperSet.Units[0]].Weight
&& UnitWeight == RegUnits[SuperSet.Units.back()].Weight) {
DEBUG(dbgs() << "UnitSet " << SubIdx << " subsumed by " << SuperIdx
<< "\n");
break;
}
}
if (SuperIdx == EndIdx)
SuperSetIDs.push_back(SubIdx);
}
// Populate PrunedUnitSets with each equivalence class's superset.
std::vector<RegUnitSet> PrunedUnitSets(SuperSetIDs.size());
for (unsigned i = 0, e = SuperSetIDs.size(); i != e; ++i) {
unsigned SuperIdx = SuperSetIDs[i];
PrunedUnitSets[i].Name = RegUnitSets[SuperIdx].Name;
PrunedUnitSets[i].Units.swap(RegUnitSets[SuperIdx].Units);
}
RegUnitSets.swap(PrunedUnitSets);
}
// Create a RegUnitSet for each RegClass that contains all units in the class
// including adopted units that are necessary to model register pressure. Then
// iteratively compute RegUnitSets such that the union of any two overlapping
// RegUnitSets is repreresented.
//
// RegisterInfoEmitter will map each RegClass to its RegUnitClass and any
// RegUnitSet that is a superset of that RegUnitClass.
void CodeGenRegBank::computeRegUnitSets() {
assert(RegUnitSets.empty() && "dirty RegUnitSets");
// Compute a unique RegUnitSet for each RegClass.
auto &RegClasses = getRegClasses();
for (auto &RC : RegClasses) {
if (!RC.Allocatable)
continue;
// Speculatively grow the RegUnitSets to hold the new set.
RegUnitSets.resize(RegUnitSets.size() + 1);
RegUnitSets.back().Name = RC.getName();
// Compute a sorted list of units in this class.
RC.buildRegUnitSet(RegUnitSets.back().Units);
// Find an existing RegUnitSet.
std::vector<RegUnitSet>::const_iterator SetI =
findRegUnitSet(RegUnitSets, RegUnitSets.back());
if (SetI != std::prev(RegUnitSets.end()))
RegUnitSets.pop_back();
}
DEBUG(dbgs() << "\nBefore pruning:\n";
for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name
<< ":";
for (auto &U : RegUnitSets[USIdx].Units)
dbgs() << " " << RegUnits[U].Roots[0]->getName();
dbgs() << "\n";
});
// Iteratively prune unit sets.
pruneUnitSets();
DEBUG(dbgs() << "\nBefore union:\n";
for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name
<< ":";
for (auto &U : RegUnitSets[USIdx].Units)
dbgs() << " " << RegUnits[U].Roots[0]->getName();
dbgs() << "\n";
}
dbgs() << "\nUnion sets:\n");
// Iterate over all unit sets, including new ones added by this loop.
unsigned NumRegUnitSubSets = RegUnitSets.size();
for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx) {
// In theory, this is combinatorial. In practice, it needs to be bounded
// by a small number of sets for regpressure to be efficient.
// If the assert is hit, we need to implement pruning.
assert(Idx < (2*NumRegUnitSubSets) && "runaway unit set inference");
// Compare new sets with all original classes.
for (unsigned SearchIdx = (Idx >= NumRegUnitSubSets) ? 0 : Idx+1;
SearchIdx != EndIdx; ++SearchIdx) {
std::set<unsigned> Intersection;
std::set_intersection(RegUnitSets[Idx].Units.begin(),
RegUnitSets[Idx].Units.end(),
RegUnitSets[SearchIdx].Units.begin(),
RegUnitSets[SearchIdx].Units.end(),
std::inserter(Intersection, Intersection.begin()));
if (Intersection.empty())
continue;
// Speculatively grow the RegUnitSets to hold the new set.
RegUnitSets.resize(RegUnitSets.size() + 1);
RegUnitSets.back().Name =
RegUnitSets[Idx].Name + "+" + RegUnitSets[SearchIdx].Name;
std::set_union(RegUnitSets[Idx].Units.begin(),
RegUnitSets[Idx].Units.end(),
RegUnitSets[SearchIdx].Units.begin(),
RegUnitSets[SearchIdx].Units.end(),
std::inserter(RegUnitSets.back().Units,
RegUnitSets.back().Units.begin()));
// Find an existing RegUnitSet, or add the union to the unique sets.
std::vector<RegUnitSet>::const_iterator SetI =
findRegUnitSet(RegUnitSets, RegUnitSets.back());
if (SetI != std::prev(RegUnitSets.end()))
RegUnitSets.pop_back();
else {
DEBUG(dbgs() << "UnitSet " << RegUnitSets.size()-1
<< " " << RegUnitSets.back().Name << ":";
for (auto &U : RegUnitSets.back().Units)
dbgs() << " " << RegUnits[U].Roots[0]->getName();
dbgs() << "\n";);
}
}
}
// Iteratively prune unit sets after inferring supersets.
pruneUnitSets();
DEBUG(dbgs() << "\n";
for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
USIdx < USEnd; ++USIdx) {
dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name
<< ":";
for (auto &U : RegUnitSets[USIdx].Units)
dbgs() << " " << RegUnits[U].Roots[0]->getName();
dbgs() << "\n";
});
// For each register class, list the UnitSets that are supersets.
RegClassUnitSets.resize(RegClasses.size());
int RCIdx = -1;
for (auto &RC : RegClasses) {
++RCIdx;
if (!RC.Allocatable)
continue;
// Recompute the sorted list of units in this class.
std::vector<unsigned> RCRegUnits;
RC.buildRegUnitSet(RCRegUnits);
// Don't increase pressure for unallocatable regclasses.
if (RCRegUnits.empty())
continue;
DEBUG(dbgs() << "RC " << RC.getName() << " Units: \n";
for (auto &U : RCRegUnits)
dbgs() << RegUnits[U].getRoots()[0]->getName() << " ";
dbgs() << "\n UnitSetIDs:");
// Find all supersets.
for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
USIdx != USEnd; ++USIdx) {
if (isRegUnitSubSet(RCRegUnits, RegUnitSets[USIdx].Units)) {
DEBUG(dbgs() << " " << USIdx);
RegClassUnitSets[RCIdx].push_back(USIdx);
}
}
DEBUG(dbgs() << "\n");
assert(!RegClassUnitSets[RCIdx].empty() && "missing unit set for regclass");
}
// For each register unit, ensure that we have the list of UnitSets that
// contain the unit. Normally, this matches an existing list of UnitSets for a
// register class. If not, we create a new entry in RegClassUnitSets as a
// "fake" register class.
for (unsigned UnitIdx = 0, UnitEnd = NumNativeRegUnits;
UnitIdx < UnitEnd; ++UnitIdx) {
std::vector<unsigned> RUSets;
for (unsigned i = 0, e = RegUnitSets.size(); i != e; ++i) {
RegUnitSet &RUSet = RegUnitSets[i];
if (std::find(RUSet.Units.begin(), RUSet.Units.end(), UnitIdx)
== RUSet.Units.end())
continue;
RUSets.push_back(i);
}
unsigned RCUnitSetsIdx = 0;
for (unsigned e = RegClassUnitSets.size();
RCUnitSetsIdx != e; ++RCUnitSetsIdx) {
if (RegClassUnitSets[RCUnitSetsIdx] == RUSets) {
break;
}
}
RegUnits[UnitIdx].RegClassUnitSetsIdx = RCUnitSetsIdx;
if (RCUnitSetsIdx == RegClassUnitSets.size()) {
// Create a new list of UnitSets as a "fake" register class.
RegClassUnitSets.resize(RCUnitSetsIdx + 1);
RegClassUnitSets[RCUnitSetsIdx].swap(RUSets);
}
}
}
void CodeGenRegBank::computeRegUnitLaneMasks() {
for (auto &Register : Registers) {
// Create an initial lane mask for all register units.
const auto &RegUnits = Register.getRegUnits();
CodeGenRegister::RegUnitLaneMaskList RegUnitLaneMasks(RegUnits.count(), 0);
// Iterate through SubRegisters.
typedef CodeGenRegister::SubRegMap SubRegMap;
const SubRegMap &SubRegs = Register.getSubRegs();
for (SubRegMap::const_iterator S = SubRegs.begin(),
SE = SubRegs.end(); S != SE; ++S) {
CodeGenRegister *SubReg = S->second;
// Ignore non-leaf subregisters, their lane masks are fully covered by
// the leaf subregisters anyway.
if (SubReg->getSubRegs().size() != 0)
continue;
CodeGenSubRegIndex *SubRegIndex = S->first;
const CodeGenRegister *SubRegister = S->second;
unsigned LaneMask = SubRegIndex->LaneMask;
// Distribute LaneMask to Register Units touched.
for (unsigned SUI : SubRegister->getRegUnits()) {
bool Found = false;
unsigned u = 0;
for (unsigned RU : RegUnits) {
if (SUI == RU) {
RegUnitLaneMasks[u] |= LaneMask;
assert(!Found);
Found = true;
}
++u;
}
(void)Found;
assert(Found);
}
}
Register.setRegUnitLaneMasks(RegUnitLaneMasks);
}
}
void CodeGenRegBank::computeDerivedInfo() {
computeComposites();
computeSubRegLaneMasks();
// Compute a weight for each register unit created during getSubRegs.
// This may create adopted register units (with unit # >= NumNativeRegUnits).
computeRegUnitWeights();
// Compute a unique set of RegUnitSets. One for each RegClass and inferred
// supersets for the union of overlapping sets.
computeRegUnitSets();
computeRegUnitLaneMasks();
// Compute register class HasDisjunctSubRegs flag.
for (CodeGenRegisterClass &RC : RegClasses) {
RC.HasDisjunctSubRegs = false;
for (const CodeGenRegister *Reg : RC.getMembers())
RC.HasDisjunctSubRegs |= Reg->HasDisjunctSubRegs;
}
// Get the weight of each set.
for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx)
RegUnitSets[Idx].Weight = getRegUnitSetWeight(RegUnitSets[Idx].Units);
// Find the order of each set.
RegUnitSetOrder.reserve(RegUnitSets.size());
for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx)
RegUnitSetOrder.push_back(Idx);
std::stable_sort(RegUnitSetOrder.begin(), RegUnitSetOrder.end(),
[this](unsigned ID1, unsigned ID2) {
return getRegPressureSet(ID1).Units.size() <
getRegPressureSet(ID2).Units.size();
});
for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx) {
RegUnitSets[RegUnitSetOrder[Idx]].Order = Idx;
}
}
//
// Synthesize missing register class intersections.
//
// Make sure that sub-classes of RC exists such that getCommonSubClass(RC, X)
// returns a maximal register class for all X.
//
void CodeGenRegBank::inferCommonSubClass(CodeGenRegisterClass *RC) {
assert(!RegClasses.empty());
// Stash the iterator to the last element so that this loop doesn't visit
// elements added by the getOrCreateSubClass call within it.
for (auto I = RegClasses.begin(), E = std::prev(RegClasses.end());
I != std::next(E); ++I) {
CodeGenRegisterClass *RC1 = RC;
CodeGenRegisterClass *RC2 = &*I;
if (RC1 == RC2)
continue;
// Compute the set intersection of RC1 and RC2.
const CodeGenRegister::Vec &Memb1 = RC1->getMembers();
const CodeGenRegister::Vec &Memb2 = RC2->getMembers();
CodeGenRegister::Vec Intersection;
std::set_intersection(
Memb1.begin(), Memb1.end(), Memb2.begin(), Memb2.end(),
std::inserter(Intersection, Intersection.begin()), deref<llvm::less>());
// Skip disjoint class pairs.
if (Intersection.empty())
continue;
// If RC1 and RC2 have different spill sizes or alignments, use the
// larger size for sub-classing. If they are equal, prefer RC1.
if (RC2->SpillSize > RC1->SpillSize ||
(RC2->SpillSize == RC1->SpillSize &&
RC2->SpillAlignment > RC1->SpillAlignment))
std::swap(RC1, RC2);
getOrCreateSubClass(RC1, &Intersection,
RC1->getName() + "_and_" + RC2->getName());
}
}
//
// Synthesize missing sub-classes for getSubClassWithSubReg().
//
// Make sure that the set of registers in RC with a given SubIdx sub-register
// form a register class. Update RC->SubClassWithSubReg.
//
void CodeGenRegBank::inferSubClassWithSubReg(CodeGenRegisterClass *RC) {
// Map SubRegIndex to set of registers in RC supporting that SubRegIndex.
typedef std::map<const CodeGenSubRegIndex *, CodeGenRegister::Vec,
deref<llvm::less>> SubReg2SetMap;
// Compute the set of registers supporting each SubRegIndex.
SubReg2SetMap SRSets;
for (const auto R : RC->getMembers()) {
const CodeGenRegister::SubRegMap &SRM = R->getSubRegs();
for (CodeGenRegister::SubRegMap::const_iterator I = SRM.begin(),
E = SRM.end(); I != E; ++I)
SRSets[I->first].push_back(R);
}
for (auto I : SRSets)
sortAndUniqueRegisters(I.second);
// Find matching classes for all SRSets entries. Iterate in SubRegIndex
// numerical order to visit synthetic indices last.
for (const auto &SubIdx : SubRegIndices) {
SubReg2SetMap::const_iterator I = SRSets.find(&SubIdx);
// Unsupported SubRegIndex. Skip it.
if (I == SRSets.end())
continue;
// In most cases, all RC registers support the SubRegIndex.
if (I->second.size() == RC->getMembers().size()) {
RC->setSubClassWithSubReg(&SubIdx, RC);
continue;
}
// This is a real subset. See if we have a matching class.
CodeGenRegisterClass *SubRC =
getOrCreateSubClass(RC, &I->second,
RC->getName() + "_with_" + I->first->getName());
RC->setSubClassWithSubReg(&SubIdx, SubRC);
}
}
//
// Synthesize missing sub-classes of RC for getMatchingSuperRegClass().
//
// Create sub-classes of RC such that getMatchingSuperRegClass(RC, SubIdx, X)
// has a maximal result for any SubIdx and any X >= FirstSubRegRC.
//
void CodeGenRegBank::inferMatchingSuperRegClass(CodeGenRegisterClass *RC,
std::list<CodeGenRegisterClass>::iterator FirstSubRegRC) {
SmallVector<std::pair<const CodeGenRegister*,
const CodeGenRegister*>, 16> SSPairs;
BitVector TopoSigs(getNumTopoSigs());
// Iterate in SubRegIndex numerical order to visit synthetic indices last.
for (auto &SubIdx : SubRegIndices) {
// Skip indexes that aren't fully supported by RC's registers. This was
// computed by inferSubClassWithSubReg() above which should have been
// called first.
if (RC->getSubClassWithSubReg(&SubIdx) != RC)
continue;
// Build list of (Super, Sub) pairs for this SubIdx.
SSPairs.clear();
TopoSigs.reset();
for (const auto Super : RC->getMembers()) {
const CodeGenRegister *Sub = Super->getSubRegs().find(&SubIdx)->second;
assert(Sub && "Missing sub-register");
SSPairs.push_back(std::make_pair(Super, Sub));
TopoSigs.set(Sub->getTopoSig());
}
// Iterate over sub-register class candidates. Ignore classes created by
// this loop. They will never be useful.
// Store an iterator to the last element (not end) so that this loop doesn't
// visit newly inserted elements.
assert(!RegClasses.empty());
for (auto I = FirstSubRegRC, E = std::prev(RegClasses.end());
I != std::next(E); ++I) {
CodeGenRegisterClass &SubRC = *I;
// Topological shortcut: SubRC members have the wrong shape.
if (!TopoSigs.anyCommon(SubRC.getTopoSigs()))
continue;
// Compute the subset of RC that maps into SubRC.
CodeGenRegister::Vec SubSetVec;
for (unsigned i = 0, e = SSPairs.size(); i != e; ++i)
if (SubRC.contains(SSPairs[i].second))
SubSetVec.push_back(SSPairs[i].first);
if (SubSetVec.empty())
continue;
// RC injects completely into SubRC.
sortAndUniqueRegisters(SubSetVec);
if (SubSetVec.size() == SSPairs.size()) {
SubRC.addSuperRegClass(&SubIdx, RC);
continue;
}
// Only a subset of RC maps into SubRC. Make sure it is represented by a
// class.
getOrCreateSubClass(RC, &SubSetVec, RC->getName() + "_with_" +
SubIdx.getName() + "_in_" +
SubRC.getName());
}
}
}
//
// Infer missing register classes.
//
void CodeGenRegBank::computeInferredRegisterClasses() {
assert(!RegClasses.empty());
// When this function is called, the register classes have not been sorted
// and assigned EnumValues yet. That means getSubClasses(),
// getSuperClasses(), and hasSubClass() functions are defunct.
// Use one-before-the-end so it doesn't move forward when new elements are
// added.
auto FirstNewRC = std::prev(RegClasses.end());
// Visit all register classes, including the ones being added by the loop.
// Watch out for iterator invalidation here.
for (auto I = RegClasses.begin(), E = RegClasses.end(); I != E; ++I) {
CodeGenRegisterClass *RC = &*I;
// Synthesize answers for getSubClassWithSubReg().
inferSubClassWithSubReg(RC);
// Synthesize answers for getCommonSubClass().
inferCommonSubClass(RC);
// Synthesize answers for getMatchingSuperRegClass().
inferMatchingSuperRegClass(RC);
// New register classes are created while this loop is running, and we need
// to visit all of them. I particular, inferMatchingSuperRegClass needs
// to match old super-register classes with sub-register classes created
// after inferMatchingSuperRegClass was called. At this point,
// inferMatchingSuperRegClass has checked SuperRC = [0..rci] with SubRC =
// [0..FirstNewRC). We need to cover SubRC = [FirstNewRC..rci].
if (I == FirstNewRC) {
auto NextNewRC = std::prev(RegClasses.end());
for (auto I2 = RegClasses.begin(), E2 = std::next(FirstNewRC); I2 != E2;
++I2)
inferMatchingSuperRegClass(&*I2, E2);
FirstNewRC = NextNewRC;
}
}
}
/// getRegisterClassForRegister - Find the register class that contains the
/// specified physical register. If the register is not in a register class,
/// return null. If the register is in multiple classes, and the classes have a
/// superset-subset relationship and the same set of types, return the
/// superclass. Otherwise return null.
const CodeGenRegisterClass*
CodeGenRegBank::getRegClassForRegister(Record *R) {
const CodeGenRegister *Reg = getReg(R);
const CodeGenRegisterClass *FoundRC = nullptr;
for (const auto &RC : getRegClasses()) {
if (!RC.contains(Reg))
continue;
// If this is the first class that contains the register,
// make a note of it and go on to the next class.
if (!FoundRC) {
FoundRC = &RC;
continue;
}
// If a register's classes have different types, return null.
if (RC.getValueTypes() != FoundRC->getValueTypes())
return nullptr;
// Check to see if the previously found class that contains
// the register is a subclass of the current class. If so,
// prefer the superclass.
if (RC.hasSubClass(FoundRC)) {
FoundRC = &RC;
continue;
}
// Check to see if the previously found class that contains
// the register is a superclass of the current class. If so,
// prefer the superclass.
if (FoundRC->hasSubClass(&RC))
continue;
// Multiple classes, and neither is a superclass of the other.
// Return null.
return nullptr;
}
return FoundRC;
}
BitVector CodeGenRegBank::computeCoveredRegisters(ArrayRef<Record*> Regs) {
SetVector<const CodeGenRegister*> Set;
// First add Regs with all sub-registers.
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
CodeGenRegister *Reg = getReg(Regs[i]);
if (Set.insert(Reg))
// Reg is new, add all sub-registers.
// The pre-ordering is not important here.
Reg->addSubRegsPreOrder(Set, *this);
}
// Second, find all super-registers that are completely covered by the set.
for (unsigned i = 0; i != Set.size(); ++i) {
const CodeGenRegister::SuperRegList &SR = Set[i]->getSuperRegs();
for (unsigned j = 0, e = SR.size(); j != e; ++j) {
const CodeGenRegister *Super = SR[j];
if (!Super->CoveredBySubRegs || Set.count(Super))
continue;
// This new super-register is covered by its sub-registers.
bool AllSubsInSet = true;
const CodeGenRegister::SubRegMap &SRM = Super->getSubRegs();
for (CodeGenRegister::SubRegMap::const_iterator I = SRM.begin(),
E = SRM.end(); I != E; ++I)
if (!Set.count(I->second)) {
AllSubsInSet = false;
break;
}
// All sub-registers in Set, add Super as well.
// We will visit Super later to recheck its super-registers.
if (AllSubsInSet)
Set.insert(Super);
}
}
// Convert to BitVector.
BitVector BV(Registers.size() + 1);
for (unsigned i = 0, e = Set.size(); i != e; ++i)
BV.set(Set[i]->EnumValue);
return BV;
}
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/CodeGenInstruction.cpp | //===- CodeGenInstruction.cpp - CodeGen Instruction Class Wrapper ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the CodeGenInstruction class.
//
//===----------------------------------------------------------------------===//
#include "CodeGenInstruction.h"
#include "CodeGenTarget.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include <set>
using namespace llvm;
//===----------------------------------------------------------------------===//
// CGIOperandList Implementation
//===----------------------------------------------------------------------===//
CGIOperandList::CGIOperandList(Record *R) : TheDef(R) {
isPredicable = false;
hasOptionalDef = false;
isVariadic = false;
DagInit *OutDI = R->getValueAsDag("OutOperandList");
if (DefInit *Init = dyn_cast<DefInit>(OutDI->getOperator())) {
if (Init->getDef()->getName() != "outs")
PrintFatalError(R->getName() + ": invalid def name for output list: use 'outs'");
} else
PrintFatalError(R->getName() + ": invalid output list: use 'outs'");
NumDefs = OutDI->getNumArgs();
DagInit *InDI = R->getValueAsDag("InOperandList");
if (DefInit *Init = dyn_cast<DefInit>(InDI->getOperator())) {
if (Init->getDef()->getName() != "ins")
PrintFatalError(R->getName() + ": invalid def name for input list: use 'ins'");
} else
PrintFatalError(R->getName() + ": invalid input list: use 'ins'");
unsigned MIOperandNo = 0;
std::set<std::string> OperandNames;
for (unsigned i = 0, e = InDI->getNumArgs()+OutDI->getNumArgs(); i != e; ++i){
Init *ArgInit;
std::string ArgName;
if (i < NumDefs) {
ArgInit = OutDI->getArg(i);
ArgName = OutDI->getArgName(i);
} else {
ArgInit = InDI->getArg(i-NumDefs);
ArgName = InDI->getArgName(i-NumDefs);
}
DefInit *Arg = dyn_cast<DefInit>(ArgInit);
if (!Arg)
PrintFatalError("Illegal operand for the '" + R->getName() + "' instruction!");
Record *Rec = Arg->getDef();
std::string PrintMethod = "printOperand";
std::string EncoderMethod;
std::string OperandType = "OPERAND_UNKNOWN";
std::string OperandNamespace = "MCOI";
unsigned NumOps = 1;
DagInit *MIOpInfo = nullptr;
if (Rec->isSubClassOf("RegisterOperand")) {
PrintMethod = Rec->getValueAsString("PrintMethod");
OperandType = Rec->getValueAsString("OperandType");
OperandNamespace = Rec->getValueAsString("OperandNamespace");
} else if (Rec->isSubClassOf("Operand")) {
PrintMethod = Rec->getValueAsString("PrintMethod");
OperandType = Rec->getValueAsString("OperandType");
// If there is an explicit encoder method, use it.
EncoderMethod = Rec->getValueAsString("EncoderMethod");
MIOpInfo = Rec->getValueAsDag("MIOperandInfo");
// Verify that MIOpInfo has an 'ops' root value.
if (!isa<DefInit>(MIOpInfo->getOperator()) ||
cast<DefInit>(MIOpInfo->getOperator())->getDef()->getName() != "ops")
PrintFatalError("Bad value for MIOperandInfo in operand '" + Rec->getName() +
"'\n");
// If we have MIOpInfo, then we have #operands equal to number of entries
// in MIOperandInfo.
if (unsigned NumArgs = MIOpInfo->getNumArgs())
NumOps = NumArgs;
if (Rec->isSubClassOf("PredicateOp"))
isPredicable = true;
else if (Rec->isSubClassOf("OptionalDefOperand"))
hasOptionalDef = true;
} else if (Rec->getName() == "variable_ops") {
isVariadic = true;
continue;
} else if (Rec->isSubClassOf("RegisterClass")) {
OperandType = "OPERAND_REGISTER";
} else if (!Rec->isSubClassOf("PointerLikeRegClass") &&
!Rec->isSubClassOf("unknown_class"))
PrintFatalError("Unknown operand class '" + Rec->getName() +
"' in '" + R->getName() + "' instruction!");
// Check that the operand has a name and that it's unique.
if (ArgName.empty())
PrintFatalError("In instruction '" + R->getName() + "', operand #" +
Twine(i) + " has no name!");
if (!OperandNames.insert(ArgName).second)
PrintFatalError("In instruction '" + R->getName() + "', operand #" +
Twine(i) + " has the same name as a previous operand!");
OperandList.emplace_back(Rec, ArgName, PrintMethod, EncoderMethod,
OperandNamespace + "::" + OperandType, MIOperandNo,
NumOps, MIOpInfo);
MIOperandNo += NumOps;
}
// Make sure the constraints list for each operand is large enough to hold
// constraint info, even if none is present.
for (unsigned i = 0, e = OperandList.size(); i != e; ++i)
OperandList[i].Constraints.resize(OperandList[i].MINumOperands);
}
/// getOperandNamed - Return the index of the operand with the specified
/// non-empty name. If the instruction does not have an operand with the
/// specified name, abort.
///
unsigned CGIOperandList::getOperandNamed(StringRef Name) const {
unsigned OpIdx;
if (hasOperandNamed(Name, OpIdx)) return OpIdx;
PrintFatalError("'" + TheDef->getName() +
"' does not have an operand named '$" + Name + "'!");
}
/// hasOperandNamed - Query whether the instruction has an operand of the
/// given name. If so, return true and set OpIdx to the index of the
/// operand. Otherwise, return false.
bool CGIOperandList::hasOperandNamed(StringRef Name, unsigned &OpIdx) const {
assert(!Name.empty() && "Cannot search for operand with no name!");
for (unsigned i = 0, e = OperandList.size(); i != e; ++i)
if (OperandList[i].Name == Name) {
OpIdx = i;
return true;
}
return false;
}
std::pair<unsigned,unsigned>
CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) {
if (Op.empty() || Op[0] != '$')
PrintFatalError(TheDef->getName() + ": Illegal operand name: '" + Op + "'");
std::string OpName = Op.substr(1);
std::string SubOpName;
// Check to see if this is $foo.bar.
std::string::size_type DotIdx = OpName.find_first_of(".");
if (DotIdx != std::string::npos) {
SubOpName = OpName.substr(DotIdx+1);
if (SubOpName.empty())
PrintFatalError(TheDef->getName() + ": illegal empty suboperand name in '" +Op +"'");
OpName = OpName.substr(0, DotIdx);
}
unsigned OpIdx = getOperandNamed(OpName);
if (SubOpName.empty()) { // If no suboperand name was specified:
// If one was needed, throw.
if (OperandList[OpIdx].MINumOperands > 1 && !AllowWholeOp &&
SubOpName.empty())
PrintFatalError(TheDef->getName() + ": Illegal to refer to"
" whole operand part of complex operand '" + Op + "'");
// Otherwise, return the operand.
return std::make_pair(OpIdx, 0U);
}
// Find the suboperand number involved.
DagInit *MIOpInfo = OperandList[OpIdx].MIOperandInfo;
if (!MIOpInfo)
PrintFatalError(TheDef->getName() + ": unknown suboperand name in '" + Op + "'");
// Find the operand with the right name.
for (unsigned i = 0, e = MIOpInfo->getNumArgs(); i != e; ++i)
if (MIOpInfo->getArgName(i) == SubOpName)
return std::make_pair(OpIdx, i);
// Otherwise, didn't find it!
PrintFatalError(TheDef->getName() + ": unknown suboperand name in '" + Op + "'");
return std::make_pair(0U, 0U);
}
static void ParseConstraint(const std::string &CStr, CGIOperandList &Ops) {
// EARLY_CLOBBER: @early $reg
std::string::size_type wpos = CStr.find_first_of(" \t");
std::string::size_type start = CStr.find_first_not_of(" \t");
std::string Tok = CStr.substr(start, wpos - start);
if (Tok == "@earlyclobber") {
std::string Name = CStr.substr(wpos+1);
wpos = Name.find_first_not_of(" \t");
if (wpos == std::string::npos)
PrintFatalError("Illegal format for @earlyclobber constraint: '" + CStr + "'");
Name = Name.substr(wpos);
std::pair<unsigned,unsigned> Op = Ops.ParseOperandName(Name, false);
// Build the string for the operand
if (!Ops[Op.first].Constraints[Op.second].isNone())
PrintFatalError("Operand '" + Name + "' cannot have multiple constraints!");
Ops[Op.first].Constraints[Op.second] =
CGIOperandList::ConstraintInfo::getEarlyClobber();
return;
}
// Only other constraint is "TIED_TO" for now.
std::string::size_type pos = CStr.find_first_of('=');
assert(pos != std::string::npos && "Unrecognized constraint");
start = CStr.find_first_not_of(" \t");
std::string Name = CStr.substr(start, pos - start);
// TIED_TO: $src1 = $dst
wpos = Name.find_first_of(" \t");
if (wpos == std::string::npos)
PrintFatalError("Illegal format for tied-to constraint: '" + CStr + "'");
std::string DestOpName = Name.substr(0, wpos);
std::pair<unsigned,unsigned> DestOp = Ops.ParseOperandName(DestOpName, false);
Name = CStr.substr(pos+1);
wpos = Name.find_first_not_of(" \t");
if (wpos == std::string::npos)
PrintFatalError("Illegal format for tied-to constraint: '" + CStr + "'");
std::string SrcOpName = Name.substr(wpos);
std::pair<unsigned,unsigned> SrcOp = Ops.ParseOperandName(SrcOpName, false);
if (SrcOp > DestOp) {
std::swap(SrcOp, DestOp);
std::swap(SrcOpName, DestOpName);
}
unsigned FlatOpNo = Ops.getFlattenedOperandNumber(SrcOp);
if (!Ops[DestOp.first].Constraints[DestOp.second].isNone())
PrintFatalError("Operand '" + DestOpName +
"' cannot have multiple constraints!");
Ops[DestOp.first].Constraints[DestOp.second] =
CGIOperandList::ConstraintInfo::getTied(FlatOpNo);
}
static void ParseConstraints(const std::string &CStr, CGIOperandList &Ops) {
if (CStr.empty()) return;
const std::string delims(",");
std::string::size_type bidx, eidx;
bidx = CStr.find_first_not_of(delims);
while (bidx != std::string::npos) {
eidx = CStr.find_first_of(delims, bidx);
if (eidx == std::string::npos)
eidx = CStr.length();
ParseConstraint(CStr.substr(bidx, eidx - bidx), Ops);
bidx = CStr.find_first_not_of(delims, eidx);
}
}
void CGIOperandList::ProcessDisableEncoding(std::string DisableEncoding) {
while (1) {
std::pair<StringRef, StringRef> P = getToken(DisableEncoding, " ,\t");
std::string OpName = P.first;
DisableEncoding = P.second;
if (OpName.empty()) break;
// Figure out which operand this is.
std::pair<unsigned,unsigned> Op = ParseOperandName(OpName, false);
// Mark the operand as not-to-be encoded.
if (Op.second >= OperandList[Op.first].DoNotEncode.size())
OperandList[Op.first].DoNotEncode.resize(Op.second+1);
OperandList[Op.first].DoNotEncode[Op.second] = true;
}
}
//===----------------------------------------------------------------------===//
// CodeGenInstruction Implementation
//===----------------------------------------------------------------------===//
CodeGenInstruction::CodeGenInstruction(Record *R)
: TheDef(R), Operands(R), InferredFrom(nullptr) {
Namespace = R->getValueAsString("Namespace");
AsmString = R->getValueAsString("AsmString");
isReturn = R->getValueAsBit("isReturn");
isBranch = R->getValueAsBit("isBranch");
isIndirectBranch = R->getValueAsBit("isIndirectBranch");
isCompare = R->getValueAsBit("isCompare");
isMoveImm = R->getValueAsBit("isMoveImm");
isBitcast = R->getValueAsBit("isBitcast");
isSelect = R->getValueAsBit("isSelect");
isBarrier = R->getValueAsBit("isBarrier");
isCall = R->getValueAsBit("isCall");
canFoldAsLoad = R->getValueAsBit("canFoldAsLoad");
isPredicable = Operands.isPredicable || R->getValueAsBit("isPredicable");
isConvertibleToThreeAddress = R->getValueAsBit("isConvertibleToThreeAddress");
isCommutable = R->getValueAsBit("isCommutable");
isTerminator = R->getValueAsBit("isTerminator");
isReMaterializable = R->getValueAsBit("isReMaterializable");
hasDelaySlot = R->getValueAsBit("hasDelaySlot");
usesCustomInserter = R->getValueAsBit("usesCustomInserter");
hasPostISelHook = R->getValueAsBit("hasPostISelHook");
hasCtrlDep = R->getValueAsBit("hasCtrlDep");
isNotDuplicable = R->getValueAsBit("isNotDuplicable");
isRegSequence = R->getValueAsBit("isRegSequence");
isExtractSubreg = R->getValueAsBit("isExtractSubreg");
isInsertSubreg = R->getValueAsBit("isInsertSubreg");
isConvergent = R->getValueAsBit("isConvergent");
bool Unset;
mayLoad = R->getValueAsBitOrUnset("mayLoad", Unset);
mayLoad_Unset = Unset;
mayStore = R->getValueAsBitOrUnset("mayStore", Unset);
mayStore_Unset = Unset;
hasSideEffects = R->getValueAsBitOrUnset("hasSideEffects", Unset);
hasSideEffects_Unset = Unset;
isAsCheapAsAMove = R->getValueAsBit("isAsCheapAsAMove");
hasExtraSrcRegAllocReq = R->getValueAsBit("hasExtraSrcRegAllocReq");
hasExtraDefRegAllocReq = R->getValueAsBit("hasExtraDefRegAllocReq");
isCodeGenOnly = R->getValueAsBit("isCodeGenOnly");
isPseudo = R->getValueAsBit("isPseudo");
ImplicitDefs = R->getValueAsListOfDefs("Defs");
ImplicitUses = R->getValueAsListOfDefs("Uses");
// Parse Constraints.
ParseConstraints(R->getValueAsString("Constraints"), Operands);
// Parse the DisableEncoding field.
Operands.ProcessDisableEncoding(R->getValueAsString("DisableEncoding"));
// First check for a ComplexDeprecationPredicate.
if (R->getValue("ComplexDeprecationPredicate")) {
HasComplexDeprecationPredicate = true;
DeprecatedReason = R->getValueAsString("ComplexDeprecationPredicate");
} else if (RecordVal *Dep = R->getValue("DeprecatedFeatureMask")) {
// Check if we have a Subtarget feature mask.
HasComplexDeprecationPredicate = false;
DeprecatedReason = Dep->getValue()->getAsString();
} else {
// This instruction isn't deprecated.
HasComplexDeprecationPredicate = false;
DeprecatedReason = "";
}
}
/// HasOneImplicitDefWithKnownVT - If the instruction has at least one
/// implicit def and it has a known VT, return the VT, otherwise return
/// MVT::Other.
MVT::SimpleValueType CodeGenInstruction::
HasOneImplicitDefWithKnownVT(const CodeGenTarget &TargetInfo) const {
if (ImplicitDefs.empty()) return MVT::Other;
// Check to see if the first implicit def has a resolvable type.
Record *FirstImplicitDef = ImplicitDefs[0];
assert(FirstImplicitDef->isSubClassOf("Register"));
const std::vector<MVT::SimpleValueType> &RegVTs =
TargetInfo.getRegisterVTs(FirstImplicitDef);
if (RegVTs.size() == 1)
return RegVTs[0];
return MVT::Other;
}
/// FlattenAsmStringVariants - Flatten the specified AsmString to only
/// include text from the specified variant, returning the new string.
std::string CodeGenInstruction::
FlattenAsmStringVariants(StringRef Cur, unsigned Variant) {
std::string Res = "";
for (;;) {
// Find the start of the next variant string.
size_t VariantsStart = 0;
for (size_t e = Cur.size(); VariantsStart != e; ++VariantsStart)
if (Cur[VariantsStart] == '{' &&
(VariantsStart == 0 || (Cur[VariantsStart-1] != '$' &&
Cur[VariantsStart-1] != '\\')))
break;
// Add the prefix to the result.
Res += Cur.slice(0, VariantsStart);
if (VariantsStart == Cur.size())
break;
++VariantsStart; // Skip the '{'.
// Scan to the end of the variants string.
size_t VariantsEnd = VariantsStart;
unsigned NestedBraces = 1;
for (size_t e = Cur.size(); VariantsEnd != e; ++VariantsEnd) {
if (Cur[VariantsEnd] == '}' && Cur[VariantsEnd-1] != '\\') {
if (--NestedBraces == 0)
break;
} else if (Cur[VariantsEnd] == '{')
++NestedBraces;
}
// Select the Nth variant (or empty).
StringRef Selection = Cur.slice(VariantsStart, VariantsEnd);
for (unsigned i = 0; i != Variant; ++i)
Selection = Selection.split('|').second;
Res += Selection.split('|').first;
assert(VariantsEnd != Cur.size() &&
"Unterminated variants in assembly string!");
Cur = Cur.substr(VariantsEnd + 1);
}
return Res;
}
//===----------------------------------------------------------------------===//
/// CodeGenInstAlias Implementation
//===----------------------------------------------------------------------===//
/// tryAliasOpMatch - This is a helper function for the CodeGenInstAlias
/// constructor. It checks if an argument in an InstAlias pattern matches
/// the corresponding operand of the instruction. It returns true on a
/// successful match, with ResOp set to the result operand to be used.
bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
Record *InstOpRec, bool hasSubOps,
ArrayRef<SMLoc> Loc, CodeGenTarget &T,
ResultOperand &ResOp) {
Init *Arg = Result->getArg(AliasOpNo);
DefInit *ADI = dyn_cast<DefInit>(Arg);
Record *ResultRecord = ADI ? ADI->getDef() : nullptr;
if (ADI && ADI->getDef() == InstOpRec) {
// If the operand is a record, it must have a name, and the record type
// must match up with the instruction's argument type.
if (Result->getArgName(AliasOpNo).empty())
PrintFatalError(Loc, "result argument #" + Twine(AliasOpNo) +
" must have a name!");
ResOp = ResultOperand(Result->getArgName(AliasOpNo), ResultRecord);
return true;
}
// For register operands, the source register class can be a subclass
// of the instruction register class, not just an exact match.
if (InstOpRec->isSubClassOf("RegisterOperand"))
InstOpRec = InstOpRec->getValueAsDef("RegClass");
if (ADI && ADI->getDef()->isSubClassOf("RegisterOperand"))
ADI = ADI->getDef()->getValueAsDef("RegClass")->getDefInit();
if (ADI && ADI->getDef()->isSubClassOf("RegisterClass")) {
if (!InstOpRec->isSubClassOf("RegisterClass"))
return false;
if (!T.getRegisterClass(InstOpRec)
.hasSubClass(&T.getRegisterClass(ADI->getDef())))
return false;
ResOp = ResultOperand(Result->getArgName(AliasOpNo), ResultRecord);
return true;
}
// Handle explicit registers.
if (ADI && ADI->getDef()->isSubClassOf("Register")) {
if (InstOpRec->isSubClassOf("OptionalDefOperand")) {
DagInit *DI = InstOpRec->getValueAsDag("MIOperandInfo");
// The operand info should only have a single (register) entry. We
// want the register class of it.
InstOpRec = cast<DefInit>(DI->getArg(0))->getDef();
}
if (!InstOpRec->isSubClassOf("RegisterClass"))
return false;
if (!T.getRegisterClass(InstOpRec)
.contains(T.getRegBank().getReg(ADI->getDef())))
PrintFatalError(Loc, "fixed register " + ADI->getDef()->getName() +
" is not a member of the " + InstOpRec->getName() +
" register class!");
if (!Result->getArgName(AliasOpNo).empty())
PrintFatalError(Loc, "result fixed register argument must "
"not have a name!");
ResOp = ResultOperand(ResultRecord);
return true;
}
// Handle "zero_reg" for optional def operands.
if (ADI && ADI->getDef()->getName() == "zero_reg") {
// Check if this is an optional def.
// Tied operands where the source is a sub-operand of a complex operand
// need to represent both operands in the alias destination instruction.
// Allow zero_reg for the tied portion. This can and should go away once
// the MC representation of things doesn't use tied operands at all.
//if (!InstOpRec->isSubClassOf("OptionalDefOperand"))
// throw TGError(Loc, "reg0 used for result that is not an "
// "OptionalDefOperand!");
ResOp = ResultOperand(static_cast<Record*>(nullptr));
return true;
}
// Literal integers.
if (IntInit *II = dyn_cast<IntInit>(Arg)) {
if (hasSubOps || !InstOpRec->isSubClassOf("Operand"))
return false;
// Integer arguments can't have names.
if (!Result->getArgName(AliasOpNo).empty())
PrintFatalError(Loc, "result argument #" + Twine(AliasOpNo) +
" must not have a name!");
ResOp = ResultOperand(II->getValue());
return true;
}
// Bits<n> (also used for 0bxx literals)
if (BitsInit *BI = dyn_cast<BitsInit>(Arg)) {
if (hasSubOps || !InstOpRec->isSubClassOf("Operand"))
return false;
if (!BI->isComplete())
return false;
// Convert the bits init to an integer and use that for the result.
IntInit *II =
dyn_cast_or_null<IntInit>(BI->convertInitializerTo(IntRecTy::get()));
if (!II)
return false;
ResOp = ResultOperand(II->getValue());
return true;
}
// If both are Operands with the same MVT, allow the conversion. It's
// up to the user to make sure the values are appropriate, just like
// for isel Pat's.
if (InstOpRec->isSubClassOf("Operand") && ADI &&
ADI->getDef()->isSubClassOf("Operand")) {
// FIXME: What other attributes should we check here? Identical
// MIOperandInfo perhaps?
if (InstOpRec->getValueInit("Type") != ADI->getDef()->getValueInit("Type"))
return false;
ResOp = ResultOperand(Result->getArgName(AliasOpNo), ADI->getDef());
return true;
}
return false;
}
unsigned CodeGenInstAlias::ResultOperand::getMINumOperands() const {
if (!isRecord())
return 1;
Record *Rec = getRecord();
if (!Rec->isSubClassOf("Operand"))
return 1;
DagInit *MIOpInfo = Rec->getValueAsDag("MIOperandInfo");
if (MIOpInfo->getNumArgs() == 0) {
// Unspecified, so it defaults to 1
return 1;
}
return MIOpInfo->getNumArgs();
}
CodeGenInstAlias::CodeGenInstAlias(Record *R, unsigned Variant,
CodeGenTarget &T)
: TheDef(R) {
Result = R->getValueAsDag("ResultInst");
AsmString = R->getValueAsString("AsmString");
AsmString = CodeGenInstruction::FlattenAsmStringVariants(AsmString, Variant);
// Verify that the root of the result is an instruction.
DefInit *DI = dyn_cast<DefInit>(Result->getOperator());
if (!DI || !DI->getDef()->isSubClassOf("Instruction"))
PrintFatalError(R->getLoc(),
"result of inst alias should be an instruction");
ResultInst = &T.getInstruction(DI->getDef());
// NameClass - If argument names are repeated, we need to verify they have
// the same class.
StringMap<Record*> NameClass;
for (unsigned i = 0, e = Result->getNumArgs(); i != e; ++i) {
DefInit *ADI = dyn_cast<DefInit>(Result->getArg(i));
if (!ADI || Result->getArgName(i).empty())
continue;
// Verify we don't have something like: (someinst GR16:$foo, GR32:$foo)
// $foo can exist multiple times in the result list, but it must have the
// same type.
Record *&Entry = NameClass[Result->getArgName(i)];
if (Entry && Entry != ADI->getDef())
PrintFatalError(R->getLoc(), "result value $" + Result->getArgName(i) +
" is both " + Entry->getName() + " and " +
ADI->getDef()->getName() + "!");
Entry = ADI->getDef();
}
// Decode and validate the arguments of the result.
unsigned AliasOpNo = 0;
for (unsigned i = 0, e = ResultInst->Operands.size(); i != e; ++i) {
// Tied registers don't have an entry in the result dag unless they're part
// of a complex operand, in which case we include them anyways, as we
// don't have any other way to specify the whole operand.
if (ResultInst->Operands[i].MINumOperands == 1 &&
ResultInst->Operands[i].getTiedRegister() != -1)
continue;
if (AliasOpNo >= Result->getNumArgs())
PrintFatalError(R->getLoc(), "not enough arguments for instruction!");
Record *InstOpRec = ResultInst->Operands[i].Rec;
unsigned NumSubOps = ResultInst->Operands[i].MINumOperands;
ResultOperand ResOp(static_cast<int64_t>(0));
if (tryAliasOpMatch(Result, AliasOpNo, InstOpRec, (NumSubOps > 1),
R->getLoc(), T, ResOp)) {
// If this is a simple operand, or a complex operand with a custom match
// class, then we can match is verbatim.
if (NumSubOps == 1 ||
(InstOpRec->getValue("ParserMatchClass") &&
InstOpRec->getValueAsDef("ParserMatchClass")
->getValueAsString("Name") != "Imm")) {
ResultOperands.push_back(ResOp);
ResultInstOperandIndex.push_back(std::make_pair(i, -1));
++AliasOpNo;
// Otherwise, we need to match each of the suboperands individually.
} else {
DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) {
Record *SubRec = cast<DefInit>(MIOI->getArg(SubOp))->getDef();
// Take care to instantiate each of the suboperands with the correct
// nomenclature: $foo.bar
ResultOperands.emplace_back(Result->getArgName(AliasOpNo) + "." +
MIOI->getArgName(SubOp),
SubRec);
ResultInstOperandIndex.push_back(std::make_pair(i, SubOp));
}
++AliasOpNo;
}
continue;
}
// If the argument did not match the instruction operand, and the operand
// is composed of multiple suboperands, try matching the suboperands.
if (NumSubOps > 1) {
DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) {
if (AliasOpNo >= Result->getNumArgs())
PrintFatalError(R->getLoc(), "not enough arguments for instruction!");
Record *SubRec = cast<DefInit>(MIOI->getArg(SubOp))->getDef();
if (tryAliasOpMatch(Result, AliasOpNo, SubRec, false,
R->getLoc(), T, ResOp)) {
ResultOperands.push_back(ResOp);
ResultInstOperandIndex.push_back(std::make_pair(i, SubOp));
++AliasOpNo;
} else {
PrintFatalError(R->getLoc(), "result argument #" + Twine(AliasOpNo) +
" does not match instruction operand class " +
(SubOp == 0 ? InstOpRec->getName() :SubRec->getName()));
}
}
continue;
}
PrintFatalError(R->getLoc(), "result argument #" + Twine(AliasOpNo) +
" does not match instruction operand class " +
InstOpRec->getName());
}
if (AliasOpNo != Result->getNumArgs())
PrintFatalError(R->getLoc(), "too many operands for instruction!");
}
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/RegisterInfoEmitter.cpp | //===- RegisterInfoEmitter.cpp - Generate a Register File Desc. -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This tablegen backend is responsible for emitting a description of a target
// register file for a code generator. It uses instances of the Register,
// RegisterAliases, and RegisterClass classes to gather this information.
//
//===----------------------------------------------------------------------===//
#include "CodeGenRegisters.h"
#include "CodeGenTarget.h"
#include "SequenceToOffsetTable.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Format.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
#include <set>
#include <vector>
using namespace llvm;
namespace {
class RegisterInfoEmitter {
RecordKeeper &Records;
public:
RegisterInfoEmitter(RecordKeeper &R) : Records(R) {}
// runEnums - Print out enum values for all of the registers.
void runEnums(raw_ostream &o, CodeGenTarget &Target, CodeGenRegBank &Bank);
// runMCDesc - Print out MC register descriptions.
void runMCDesc(raw_ostream &o, CodeGenTarget &Target, CodeGenRegBank &Bank);
// runTargetHeader - Emit a header fragment for the register info emitter.
void runTargetHeader(raw_ostream &o, CodeGenTarget &Target,
CodeGenRegBank &Bank);
// runTargetDesc - Output the target register and register file descriptions.
void runTargetDesc(raw_ostream &o, CodeGenTarget &Target,
CodeGenRegBank &Bank);
// run - Output the register file description.
void run(raw_ostream &o);
private:
void EmitRegMapping(raw_ostream &o, const std::deque<CodeGenRegister> &Regs,
bool isCtor);
void EmitRegMappingTables(raw_ostream &o,
const std::deque<CodeGenRegister> &Regs,
bool isCtor);
void EmitRegUnitPressure(raw_ostream &OS, const CodeGenRegBank &RegBank,
const std::string &ClassName);
void emitComposeSubRegIndices(raw_ostream &OS, CodeGenRegBank &RegBank,
const std::string &ClassName);
void emitComposeSubRegIndexLaneMask(raw_ostream &OS, CodeGenRegBank &RegBank,
const std::string &ClassName);
};
} // End anonymous namespace
// runEnums - Print out enum values for all of the registers.
void RegisterInfoEmitter::runEnums(raw_ostream &OS,
CodeGenTarget &Target, CodeGenRegBank &Bank) {
const auto &Registers = Bank.getRegisters();
// Register enums are stored as uint16_t in the tables. Make sure we'll fit.
assert(Registers.size() <= 0xffff && "Too many regs to fit in tables");
std::string Namespace =
Registers.front().TheDef->getValueAsString("Namespace");
emitSourceFileHeader("Target Register Enum Values", OS);
OS << "\n#ifdef GET_REGINFO_ENUM\n";
OS << "#undef GET_REGINFO_ENUM\n";
OS << "namespace llvm {\n\n";
OS << "class MCRegisterClass;\n"
<< "extern const MCRegisterClass " << Namespace
<< "MCRegisterClasses[];\n\n";
if (!Namespace.empty())
OS << "namespace " << Namespace << " {\n";
OS << "enum {\n NoRegister,\n";
for (const auto &Reg : Registers)
OS << " " << Reg.getName() << " = " << Reg.EnumValue << ",\n";
assert(Registers.size() == Registers.back().EnumValue &&
"Register enum value mismatch!");
OS << " NUM_TARGET_REGS \t// " << Registers.size()+1 << "\n";
OS << "};\n";
if (!Namespace.empty())
OS << "}\n";
const auto &RegisterClasses = Bank.getRegClasses();
if (!RegisterClasses.empty()) {
// RegisterClass enums are stored as uint16_t in the tables.
assert(RegisterClasses.size() <= 0xffff &&
"Too many register classes to fit in tables");
OS << "\n// Register classes\n";
if (!Namespace.empty())
OS << "namespace " << Namespace << " {\n";
OS << "enum {\n";
for (const auto &RC : RegisterClasses)
OS << " " << RC.getName() << "RegClassID"
<< " = " << RC.EnumValue << ",\n";
OS << "\n };\n";
if (!Namespace.empty())
OS << "}\n";
}
const std::vector<Record*> &RegAltNameIndices = Target.getRegAltNameIndices();
// If the only definition is the default NoRegAltName, we don't need to
// emit anything.
if (RegAltNameIndices.size() > 1) {
OS << "\n// Register alternate name indices\n";
if (!Namespace.empty())
OS << "namespace " << Namespace << " {\n";
OS << "enum {\n";
for (unsigned i = 0, e = RegAltNameIndices.size(); i != e; ++i)
OS << " " << RegAltNameIndices[i]->getName() << ",\t// " << i << "\n";
OS << " NUM_TARGET_REG_ALT_NAMES = " << RegAltNameIndices.size() << "\n";
OS << "};\n";
if (!Namespace.empty())
OS << "}\n";
}
auto &SubRegIndices = Bank.getSubRegIndices();
if (!SubRegIndices.empty()) {
OS << "\n// Subregister indices\n";
std::string Namespace = SubRegIndices.front().getNamespace();
if (!Namespace.empty())
OS << "namespace " << Namespace << " {\n";
OS << "enum {\n NoSubRegister,\n";
unsigned i = 0;
for (const auto &Idx : SubRegIndices)
OS << " " << Idx.getName() << ",\t// " << ++i << "\n";
OS << " NUM_TARGET_SUBREGS\n};\n";
if (!Namespace.empty())
OS << "}\n";
}
OS << "} // End llvm namespace\n";
OS << "#endif // GET_REGINFO_ENUM\n\n";
}
static void printInt(raw_ostream &OS, int Val) {
OS << Val;
}
static const char *getMinimalTypeForRange(uint64_t Range) {
assert(Range < 0xFFFFFFFFULL && "Enum too large");
if (Range > 0xFFFF)
return "uint32_t";
if (Range > 0xFF)
return "uint16_t";
return "uint8_t";
}
void RegisterInfoEmitter::
EmitRegUnitPressure(raw_ostream &OS, const CodeGenRegBank &RegBank,
const std::string &ClassName) {
unsigned NumRCs = RegBank.getRegClasses().size();
unsigned NumSets = RegBank.getNumRegPressureSets();
OS << "/// Get the weight in units of pressure for this register class.\n"
<< "const RegClassWeight &" << ClassName << "::\n"
<< "getRegClassWeight(const TargetRegisterClass *RC) const {\n"
<< " static const RegClassWeight RCWeightTable[] = {\n";
for (const auto &RC : RegBank.getRegClasses()) {
const CodeGenRegister::Vec &Regs = RC.getMembers();
if (Regs.empty())
OS << " {0, 0";
else {
std::vector<unsigned> RegUnits;
RC.buildRegUnitSet(RegUnits);
OS << " {" << (*Regs.begin())->getWeight(RegBank)
<< ", " << RegBank.getRegUnitSetWeight(RegUnits);
}
OS << "}, \t// " << RC.getName() << "\n";
}
OS << " };\n"
<< " return RCWeightTable[RC->getID()];\n"
<< "}\n\n";
// Reasonable targets (not ARMv7) have unit weight for all units, so don't
// bother generating a table.
bool RegUnitsHaveUnitWeight = true;
for (unsigned UnitIdx = 0, UnitEnd = RegBank.getNumNativeRegUnits();
UnitIdx < UnitEnd; ++UnitIdx) {
if (RegBank.getRegUnit(UnitIdx).Weight > 1)
RegUnitsHaveUnitWeight = false;
}
OS << "/// Get the weight in units of pressure for this register unit.\n"
<< "unsigned " << ClassName << "::\n"
<< "getRegUnitWeight(unsigned RegUnit) const {\n"
<< " assert(RegUnit < " << RegBank.getNumNativeRegUnits()
<< " && \"invalid register unit\");\n";
if (!RegUnitsHaveUnitWeight) {
OS << " static const uint8_t RUWeightTable[] = {\n ";
for (unsigned UnitIdx = 0, UnitEnd = RegBank.getNumNativeRegUnits();
UnitIdx < UnitEnd; ++UnitIdx) {
const RegUnit &RU = RegBank.getRegUnit(UnitIdx);
assert(RU.Weight < 256 && "RegUnit too heavy");
OS << RU.Weight << ", ";
}
OS << "};\n"
<< " return RUWeightTable[RegUnit];\n";
}
else {
OS << " // All register units have unit weight.\n"
<< " return 1;\n";
}
OS << "}\n\n";
OS << "\n"
<< "// Get the number of dimensions of register pressure.\n"
<< "unsigned " << ClassName << "::getNumRegPressureSets() const {\n"
<< " return " << NumSets << ";\n}\n\n";
OS << "// Get the name of this register unit pressure set.\n"
<< "const char *" << ClassName << "::\n"
<< "getRegPressureSetName(unsigned Idx) const {\n"
<< " static const char *const PressureNameTable[] = {\n";
unsigned MaxRegUnitWeight = 0;
for (unsigned i = 0; i < NumSets; ++i ) {
const RegUnitSet &RegUnits = RegBank.getRegSetAt(i);
MaxRegUnitWeight = std::max(MaxRegUnitWeight, RegUnits.Weight);
OS << " \"" << RegUnits.Name << "\",\n";
}
OS << " nullptr };\n"
<< " return PressureNameTable[Idx];\n"
<< "}\n\n";
OS << "// Get the register unit pressure limit for this dimension.\n"
<< "// This limit must be adjusted dynamically for reserved registers.\n"
<< "unsigned " << ClassName << "::\n"
<< "getRegPressureSetLimit(const MachineFunction &MF, unsigned Idx) const {\n"
<< " static const " << getMinimalTypeForRange(MaxRegUnitWeight)
<< " PressureLimitTable[] = {\n";
for (unsigned i = 0; i < NumSets; ++i ) {
const RegUnitSet &RegUnits = RegBank.getRegSetAt(i);
OS << " " << RegUnits.Weight << ", \t// " << i << ": "
<< RegUnits.Name << "\n";
}
OS << " };\n"
<< " return PressureLimitTable[Idx];\n"
<< "}\n\n";
SequenceToOffsetTable<std::vector<int>> PSetsSeqs;
// This table may be larger than NumRCs if some register units needed a list
// of unit sets that did not correspond to a register class.
unsigned NumRCUnitSets = RegBank.getNumRegClassPressureSetLists();
std::vector<std::vector<int>> PSets(NumRCUnitSets);
for (unsigned i = 0, e = NumRCUnitSets; i != e; ++i) {
ArrayRef<unsigned> PSetIDs = RegBank.getRCPressureSetIDs(i);
PSets[i].reserve(PSetIDs.size());
for (ArrayRef<unsigned>::iterator PSetI = PSetIDs.begin(),
PSetE = PSetIDs.end(); PSetI != PSetE; ++PSetI) {
PSets[i].push_back(RegBank.getRegPressureSet(*PSetI).Order);
}
std::sort(PSets[i].begin(), PSets[i].end());
PSetsSeqs.add(PSets[i]);
}
PSetsSeqs.layout();
OS << "/// Table of pressure sets per register class or unit.\n"
<< "static const int RCSetsTable[] = {\n";
PSetsSeqs.emit(OS, printInt, "-1");
OS << "};\n\n";
OS << "/// Get the dimensions of register pressure impacted by this "
<< "register class.\n"
<< "/// Returns a -1 terminated array of pressure set IDs\n"
<< "const int* " << ClassName << "::\n"
<< "getRegClassPressureSets(const TargetRegisterClass *RC) const {\n";
OS << " static const " << getMinimalTypeForRange(PSetsSeqs.size()-1)
<< " RCSetStartTable[] = {\n ";
for (unsigned i = 0, e = NumRCs; i != e; ++i) {
OS << PSetsSeqs.get(PSets[i]) << ",";
}
OS << "};\n"
<< " return &RCSetsTable[RCSetStartTable[RC->getID()]];\n"
<< "}\n\n";
OS << "/// Get the dimensions of register pressure impacted by this "
<< "register unit.\n"
<< "/// Returns a -1 terminated array of pressure set IDs\n"
<< "const int* " << ClassName << "::\n"
<< "getRegUnitPressureSets(unsigned RegUnit) const {\n"
<< " assert(RegUnit < " << RegBank.getNumNativeRegUnits()
<< " && \"invalid register unit\");\n";
OS << " static const " << getMinimalTypeForRange(PSetsSeqs.size()-1)
<< " RUSetStartTable[] = {\n ";
for (unsigned UnitIdx = 0, UnitEnd = RegBank.getNumNativeRegUnits();
UnitIdx < UnitEnd; ++UnitIdx) {
OS << PSetsSeqs.get(PSets[RegBank.getRegUnit(UnitIdx).RegClassUnitSetsIdx])
<< ",";
}
OS << "};\n"
<< " return &RCSetsTable[RUSetStartTable[RegUnit]];\n"
<< "}\n\n";
}
void RegisterInfoEmitter::EmitRegMappingTables(
raw_ostream &OS, const std::deque<CodeGenRegister> &Regs, bool isCtor) {
// Collect all information about dwarf register numbers
typedef std::map<Record*, std::vector<int64_t>, LessRecordRegister> DwarfRegNumsMapTy;
DwarfRegNumsMapTy DwarfRegNums;
// First, just pull all provided information to the map
unsigned maxLength = 0;
for (auto &RE : Regs) {
Record *Reg = RE.TheDef;
std::vector<int64_t> RegNums = Reg->getValueAsListOfInts("DwarfNumbers");
maxLength = std::max((size_t)maxLength, RegNums.size());
if (DwarfRegNums.count(Reg))
PrintWarning(Reg->getLoc(), Twine("DWARF numbers for register ") +
getQualifiedName(Reg) + "specified multiple times");
DwarfRegNums[Reg] = RegNums;
}
if (!maxLength)
return;
// Now we know maximal length of number list. Append -1's, where needed
for (DwarfRegNumsMapTy::iterator
I = DwarfRegNums.begin(), E = DwarfRegNums.end(); I != E; ++I)
for (unsigned i = I->second.size(), e = maxLength; i != e; ++i)
I->second.push_back(-1);
std::string Namespace = Regs.front().TheDef->getValueAsString("Namespace");
OS << "// " << Namespace << " Dwarf<->LLVM register mappings.\n";
// Emit reverse information about the dwarf register numbers.
for (unsigned j = 0; j < 2; ++j) {
for (unsigned i = 0, e = maxLength; i != e; ++i) {
OS << "extern const MCRegisterInfo::DwarfLLVMRegPair " << Namespace;
OS << (j == 0 ? "DwarfFlavour" : "EHFlavour");
OS << i << "Dwarf2L[]";
if (!isCtor) {
OS << " = {\n";
// Store the mapping sorted by the LLVM reg num so lookup can be done
// with a binary search.
std::map<uint64_t, Record*> Dwarf2LMap;
for (DwarfRegNumsMapTy::iterator
I = DwarfRegNums.begin(), E = DwarfRegNums.end(); I != E; ++I) {
int DwarfRegNo = I->second[i];
if (DwarfRegNo < 0)
continue;
Dwarf2LMap[DwarfRegNo] = I->first;
}
for (std::map<uint64_t, Record*>::iterator
I = Dwarf2LMap.begin(), E = Dwarf2LMap.end(); I != E; ++I)
OS << " { " << I->first << "U, " << getQualifiedName(I->second)
<< " },\n";
OS << "};\n";
} else {
OS << ";\n";
}
// We have to store the size in a const global, it's used in multiple
// places.
OS << "extern const unsigned " << Namespace
<< (j == 0 ? "DwarfFlavour" : "EHFlavour") << i << "Dwarf2LSize";
if (!isCtor)
OS << " = array_lengthof(" << Namespace
<< (j == 0 ? "DwarfFlavour" : "EHFlavour") << i
<< "Dwarf2L);\n\n";
else
OS << ";\n\n";
}
}
for (auto &RE : Regs) {
Record *Reg = RE.TheDef;
const RecordVal *V = Reg->getValue("DwarfAlias");
if (!V || !V->getValue())
continue;
DefInit *DI = cast<DefInit>(V->getValue());
Record *Alias = DI->getDef();
DwarfRegNums[Reg] = DwarfRegNums[Alias];
}
// Emit information about the dwarf register numbers.
for (unsigned j = 0; j < 2; ++j) {
for (unsigned i = 0, e = maxLength; i != e; ++i) {
OS << "extern const MCRegisterInfo::DwarfLLVMRegPair " << Namespace;
OS << (j == 0 ? "DwarfFlavour" : "EHFlavour");
OS << i << "L2Dwarf[]";
if (!isCtor) {
OS << " = {\n";
// Store the mapping sorted by the Dwarf reg num so lookup can be done
// with a binary search.
for (DwarfRegNumsMapTy::iterator
I = DwarfRegNums.begin(), E = DwarfRegNums.end(); I != E; ++I) {
int RegNo = I->second[i];
if (RegNo == -1) // -1 is the default value, don't emit a mapping.
continue;
OS << " { " << getQualifiedName(I->first) << ", " << RegNo
<< "U },\n";
}
OS << "};\n";
} else {
OS << ";\n";
}
// We have to store the size in a const global, it's used in multiple
// places.
OS << "extern const unsigned " << Namespace
<< (j == 0 ? "DwarfFlavour" : "EHFlavour") << i << "L2DwarfSize";
if (!isCtor)
OS << " = array_lengthof(" << Namespace
<< (j == 0 ? "DwarfFlavour" : "EHFlavour") << i << "L2Dwarf);\n\n";
else
OS << ";\n\n";
}
}
}
void RegisterInfoEmitter::EmitRegMapping(
raw_ostream &OS, const std::deque<CodeGenRegister> &Regs, bool isCtor) {
// Emit the initializer so the tables from EmitRegMappingTables get wired up
// to the MCRegisterInfo object.
unsigned maxLength = 0;
for (auto &RE : Regs) {
Record *Reg = RE.TheDef;
maxLength = std::max((size_t)maxLength,
Reg->getValueAsListOfInts("DwarfNumbers").size());
}
if (!maxLength)
return;
std::string Namespace = Regs.front().TheDef->getValueAsString("Namespace");
// Emit reverse information about the dwarf register numbers.
for (unsigned j = 0; j < 2; ++j) {
OS << " switch (";
if (j == 0)
OS << "DwarfFlavour";
else
OS << "EHFlavour";
OS << ") {\n"
<< " default:\n"
<< " llvm_unreachable(\"Unknown DWARF flavour\");\n";
for (unsigned i = 0, e = maxLength; i != e; ++i) {
OS << " case " << i << ":\n";
OS << " ";
if (!isCtor)
OS << "RI->";
std::string Tmp;
raw_string_ostream(Tmp) << Namespace
<< (j == 0 ? "DwarfFlavour" : "EHFlavour") << i
<< "Dwarf2L";
OS << "mapDwarfRegsToLLVMRegs(" << Tmp << ", " << Tmp << "Size, ";
if (j == 0)
OS << "false";
else
OS << "true";
OS << ");\n";
OS << " break;\n";
}
OS << " }\n";
}
// Emit information about the dwarf register numbers.
for (unsigned j = 0; j < 2; ++j) {
OS << " switch (";
if (j == 0)
OS << "DwarfFlavour";
else
OS << "EHFlavour";
OS << ") {\n"
<< " default:\n"
<< " llvm_unreachable(\"Unknown DWARF flavour\");\n";
for (unsigned i = 0, e = maxLength; i != e; ++i) {
OS << " case " << i << ":\n";
OS << " ";
if (!isCtor)
OS << "RI->";
std::string Tmp;
raw_string_ostream(Tmp) << Namespace
<< (j == 0 ? "DwarfFlavour" : "EHFlavour") << i
<< "L2Dwarf";
OS << "mapLLVMRegsToDwarfRegs(" << Tmp << ", " << Tmp << "Size, ";
if (j == 0)
OS << "false";
else
OS << "true";
OS << ");\n";
OS << " break;\n";
}
OS << " }\n";
}
}
// Print a BitVector as a sequence of hex numbers using a little-endian mapping.
// Width is the number of bits per hex number.
static void printBitVectorAsHex(raw_ostream &OS,
const BitVector &Bits,
unsigned Width) {
assert(Width <= 32 && "Width too large");
unsigned Digits = (Width + 3) / 4;
for (unsigned i = 0, e = Bits.size(); i < e; i += Width) {
unsigned Value = 0;
for (unsigned j = 0; j != Width && i + j != e; ++j)
Value |= Bits.test(i + j) << j;
OS << format("0x%0*x, ", Digits, Value);
}
}
// Helper to emit a set of bits into a constant byte array.
class BitVectorEmitter {
BitVector Values;
public:
void add(unsigned v) {
if (v >= Values.size())
Values.resize(((v/8)+1)*8); // Round up to the next byte.
Values[v] = true;
}
void print(raw_ostream &OS) {
printBitVectorAsHex(OS, Values, 8);
}
};
static void printSimpleValueType(raw_ostream &OS, MVT::SimpleValueType VT) {
OS << getEnumName(VT);
}
static void printSubRegIndex(raw_ostream &OS, const CodeGenSubRegIndex *Idx) {
OS << Idx->EnumValue;
}
// Differentially encoded register and regunit lists allow for better
// compression on regular register banks. The sequence is computed from the
// differential list as:
//
// out[0] = InitVal;
// out[n+1] = out[n] + diff[n]; // n = 0, 1, ...
//
// The initial value depends on the specific list. The list is terminated by a
// 0 differential which means we can't encode repeated elements.
typedef SmallVector<uint16_t, 4> DiffVec;
typedef SmallVector<unsigned, 4> MaskVec;
// Differentially encode a sequence of numbers into V. The starting value and
// terminating 0 are not added to V, so it will have the same size as List.
static
DiffVec &diffEncode(DiffVec &V, unsigned InitVal, SparseBitVector<> List) {
assert(V.empty() && "Clear DiffVec before diffEncode.");
uint16_t Val = uint16_t(InitVal);
for (uint16_t Cur : List) {
V.push_back(Cur - Val);
Val = Cur;
}
return V;
}
template<typename Iter>
static
DiffVec &diffEncode(DiffVec &V, unsigned InitVal, Iter Begin, Iter End) {
assert(V.empty() && "Clear DiffVec before diffEncode.");
uint16_t Val = uint16_t(InitVal);
for (Iter I = Begin; I != End; ++I) {
uint16_t Cur = (*I)->EnumValue;
V.push_back(Cur - Val);
Val = Cur;
}
return V;
}
static void printDiff16(raw_ostream &OS, uint16_t Val) {
OS << Val;
}
static void printMask(raw_ostream &OS, unsigned Val) {
OS << format("0x%08X", Val);
}
// Try to combine Idx's compose map into Vec if it is compatible.
// Return false if it's not possible.
static bool combine(const CodeGenSubRegIndex *Idx,
SmallVectorImpl<CodeGenSubRegIndex*> &Vec) {
const CodeGenSubRegIndex::CompMap &Map = Idx->getComposites();
for (const auto &I : Map) {
CodeGenSubRegIndex *&Entry = Vec[I.first->EnumValue - 1];
if (Entry && Entry != I.second)
return false;
}
// All entries are compatible. Make it so.
for (const auto &I : Map) {
auto *&Entry = Vec[I.first->EnumValue - 1];
assert((!Entry || Entry == I.second) &&
"Expected EnumValue to be unique");
Entry = I.second;
}
return true;
}
void
RegisterInfoEmitter::emitComposeSubRegIndices(raw_ostream &OS,
CodeGenRegBank &RegBank,
const std::string &ClName) {
const auto &SubRegIndices = RegBank.getSubRegIndices();
OS << "unsigned " << ClName
<< "::composeSubRegIndicesImpl(unsigned IdxA, unsigned IdxB) const {\n";
// Many sub-register indexes are composition-compatible, meaning that
//
// compose(IdxA, IdxB) == compose(IdxA', IdxB)
//
// for many IdxA, IdxA' pairs. Not all sub-register indexes can be composed.
// The illegal entries can be use as wildcards to compress the table further.
// Map each Sub-register index to a compatible table row.
SmallVector<unsigned, 4> RowMap;
SmallVector<SmallVector<CodeGenSubRegIndex*, 4>, 4> Rows;
auto SubRegIndicesSize =
std::distance(SubRegIndices.begin(), SubRegIndices.end());
for (const auto &Idx : SubRegIndices) {
unsigned Found = ~0u;
for (unsigned r = 0, re = Rows.size(); r != re; ++r) {
if (combine(&Idx, Rows[r])) {
Found = r;
break;
}
}
if (Found == ~0u) {
Found = Rows.size();
Rows.resize(Found + 1);
Rows.back().resize(SubRegIndicesSize);
combine(&Idx, Rows.back());
}
RowMap.push_back(Found);
}
// Output the row map if there is multiple rows.
if (Rows.size() > 1) {
OS << " static const " << getMinimalTypeForRange(Rows.size()) << " RowMap["
<< SubRegIndicesSize << "] = {\n ";
for (unsigned i = 0, e = SubRegIndicesSize; i != e; ++i)
OS << RowMap[i] << ", ";
OS << "\n };\n";
}
// Output the rows.
OS << " static const " << getMinimalTypeForRange(SubRegIndicesSize + 1)
<< " Rows[" << Rows.size() << "][" << SubRegIndicesSize << "] = {\n";
for (unsigned r = 0, re = Rows.size(); r != re; ++r) {
OS << " { ";
for (unsigned i = 0, e = SubRegIndicesSize; i != e; ++i)
if (Rows[r][i])
OS << Rows[r][i]->EnumValue << ", ";
else
OS << "0, ";
OS << "},\n";
}
OS << " };\n\n";
OS << " --IdxA; assert(IdxA < " << SubRegIndicesSize << ");\n"
<< " --IdxB; assert(IdxB < " << SubRegIndicesSize << ");\n";
if (Rows.size() > 1)
OS << " return Rows[RowMap[IdxA]][IdxB];\n";
else
OS << " return Rows[0][IdxB];\n";
OS << "}\n\n";
}
void
RegisterInfoEmitter::emitComposeSubRegIndexLaneMask(raw_ostream &OS,
CodeGenRegBank &RegBank,
const std::string &ClName) {
// See the comments in computeSubRegLaneMasks() for our goal here.
const auto &SubRegIndices = RegBank.getSubRegIndices();
// Create a list of Mask+Rotate operations, with equivalent entries merged.
SmallVector<unsigned, 4> SubReg2SequenceIndexMap;
SmallVector<SmallVector<MaskRolPair, 1>, 4> Sequences;
for (const auto &Idx : SubRegIndices) {
const SmallVector<MaskRolPair, 1> &IdxSequence
= Idx.CompositionLaneMaskTransform;
unsigned Found = ~0u;
unsigned SIdx = 0;
unsigned NextSIdx;
for (size_t s = 0, se = Sequences.size(); s != se; ++s, SIdx = NextSIdx) {
SmallVectorImpl<MaskRolPair> &Sequence = Sequences[s];
NextSIdx = SIdx + Sequence.size() + 1;
if (Sequence == IdxSequence) {
Found = SIdx;
break;
}
}
if (Found == ~0u) {
Sequences.push_back(IdxSequence);
Found = SIdx;
}
SubReg2SequenceIndexMap.push_back(Found);
}
OS << "unsigned " << ClName
<< "::composeSubRegIndexLaneMaskImpl(unsigned IdxA, unsigned LaneMask)"
" const {\n";
OS << " struct MaskRolOp {\n"
" unsigned Mask;\n"
" uint8_t RotateLeft;\n"
" };\n"
" static const MaskRolOp Seqs[] = {\n";
unsigned Idx = 0;
for (size_t s = 0, se = Sequences.size(); s != se; ++s) {
OS << " ";
const SmallVectorImpl<MaskRolPair> &Sequence = Sequences[s];
for (size_t p = 0, pe = Sequence.size(); p != pe; ++p) {
const MaskRolPair &P = Sequence[p];
OS << format("{ 0x%08X, %2u }, ", P.Mask, P.RotateLeft);
}
OS << "{ 0, 0 }";
if (s+1 != se)
OS << ", ";
OS << " // Sequence " << Idx << "\n";
Idx += Sequence.size() + 1;
}
OS << " };\n"
" static const MaskRolOp *const CompositeSequences[] = {\n";
for (size_t i = 0, e = SubRegIndices.size(); i != e; ++i) {
OS << " ";
unsigned Idx = SubReg2SequenceIndexMap[i];
OS << format("&Seqs[%u]", Idx);
if (i+1 != e)
OS << ",";
OS << " // to " << SubRegIndices[i].getName() << "\n";
}
OS << " };\n\n";
OS << " --IdxA; assert(IdxA < " << SubRegIndices.size()
<< " && \"Subregister index out of bounds\");\n"
" unsigned Result = 0;\n"
" for (const MaskRolOp *Ops = CompositeSequences[IdxA]; Ops->Mask != 0; ++Ops)"
" {\n"
" unsigned Masked = LaneMask & Ops->Mask;\n"
" Result |= (Masked << Ops->RotateLeft) & 0xFFFFFFFF;\n"
" Result |= (Masked >> ((32 - Ops->RotateLeft) & 0x1F));\n"
" }\n"
" return Result;\n"
"}\n";
}
//
// runMCDesc - Print out MC register descriptions.
//
void
RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
CodeGenRegBank &RegBank) {
emitSourceFileHeader("MC Register Information", OS);
OS << "\n#ifdef GET_REGINFO_MC_DESC\n";
OS << "#undef GET_REGINFO_MC_DESC\n";
const auto &Regs = RegBank.getRegisters();
auto &SubRegIndices = RegBank.getSubRegIndices();
// The lists of sub-registers and super-registers go in the same array. That
// allows us to share suffixes.
typedef std::vector<const CodeGenRegister*> RegVec;
// Differentially encoded lists.
SequenceToOffsetTable<DiffVec> DiffSeqs;
SmallVector<DiffVec, 4> SubRegLists(Regs.size());
SmallVector<DiffVec, 4> SuperRegLists(Regs.size());
SmallVector<DiffVec, 4> RegUnitLists(Regs.size());
SmallVector<unsigned, 4> RegUnitInitScale(Regs.size());
// List of lane masks accompanying register unit sequences.
SequenceToOffsetTable<MaskVec> LaneMaskSeqs;
SmallVector<MaskVec, 4> RegUnitLaneMasks(Regs.size());
// Keep track of sub-register names as well. These are not differentially
// encoded.
typedef SmallVector<const CodeGenSubRegIndex*, 4> SubRegIdxVec;
SequenceToOffsetTable<SubRegIdxVec, deref<llvm::less>> SubRegIdxSeqs;
SmallVector<SubRegIdxVec, 4> SubRegIdxLists(Regs.size());
SequenceToOffsetTable<std::string> RegStrings;
// Precompute register lists for the SequenceToOffsetTable.
unsigned i = 0;
for (auto I = Regs.begin(), E = Regs.end(); I != E; ++I, ++i) {
const auto &Reg = *I;
RegStrings.add(Reg.getName());
// Compute the ordered sub-register list.
SetVector<const CodeGenRegister*> SR;
Reg.addSubRegsPreOrder(SR, RegBank);
diffEncode(SubRegLists[i], Reg.EnumValue, SR.begin(), SR.end());
DiffSeqs.add(SubRegLists[i]);
// Compute the corresponding sub-register indexes.
SubRegIdxVec &SRIs = SubRegIdxLists[i];
for (unsigned j = 0, je = SR.size(); j != je; ++j)
SRIs.push_back(Reg.getSubRegIndex(SR[j]));
SubRegIdxSeqs.add(SRIs);
// Super-registers are already computed.
const RegVec &SuperRegList = Reg.getSuperRegs();
diffEncode(SuperRegLists[i], Reg.EnumValue, SuperRegList.begin(),
SuperRegList.end());
DiffSeqs.add(SuperRegLists[i]);
// Differentially encode the register unit list, seeded by register number.
// First compute a scale factor that allows more diff-lists to be reused:
//
// D0 -> (S0, S1)
// D1 -> (S2, S3)
//
// A scale factor of 2 allows D0 and D1 to share a diff-list. The initial
// value for the differential decoder is the register number multiplied by
// the scale.
//
// Check the neighboring registers for arithmetic progressions.
unsigned ScaleA = ~0u, ScaleB = ~0u;
SparseBitVector<> RUs = Reg.getNativeRegUnits();
if (I != Regs.begin() &&
std::prev(I)->getNativeRegUnits().count() == RUs.count())
ScaleB = *RUs.begin() - *std::prev(I)->getNativeRegUnits().begin();
if (std::next(I) != Regs.end() &&
std::next(I)->getNativeRegUnits().count() == RUs.count())
ScaleA = *std::next(I)->getNativeRegUnits().begin() - *RUs.begin();
unsigned Scale = std::min(ScaleB, ScaleA);
// Default the scale to 0 if it can't be encoded in 4 bits.
if (Scale >= 16)
Scale = 0;
RegUnitInitScale[i] = Scale;
DiffSeqs.add(diffEncode(RegUnitLists[i], Scale * Reg.EnumValue, RUs));
const auto &RUMasks = Reg.getRegUnitLaneMasks();
MaskVec &LaneMaskVec = RegUnitLaneMasks[i];
assert(LaneMaskVec.empty());
LaneMaskVec.insert(LaneMaskVec.begin(), RUMasks.begin(), RUMasks.end());
// Terminator mask should not be used inside of the list.
#ifndef NDEBUG
for (unsigned M : LaneMaskVec) {
assert(M != ~0u && "terminator mask should not be part of the list");
}
#endif
LaneMaskSeqs.add(LaneMaskVec);
}
// Compute the final layout of the sequence table.
DiffSeqs.layout();
LaneMaskSeqs.layout();
SubRegIdxSeqs.layout();
OS << "namespace llvm {\n\n";
const std::string &TargetName = Target.getName();
// Emit the shared table of differential lists.
OS << "extern const MCPhysReg " << TargetName << "RegDiffLists[] = {\n";
DiffSeqs.emit(OS, printDiff16);
OS << "};\n\n";
// Emit the shared table of regunit lane mask sequences.
OS << "extern const unsigned " << TargetName << "LaneMaskLists[] = {\n";
LaneMaskSeqs.emit(OS, printMask, "~0u");
OS << "};\n\n";
// Emit the table of sub-register indexes.
OS << "extern const uint16_t " << TargetName << "SubRegIdxLists[] = {\n";
SubRegIdxSeqs.emit(OS, printSubRegIndex);
OS << "};\n\n";
// Emit the table of sub-register index sizes.
OS << "extern const MCRegisterInfo::SubRegCoveredBits "
<< TargetName << "SubRegIdxRanges[] = {\n";
OS << " { " << (uint16_t)-1 << ", " << (uint16_t)-1 << " },\n";
for (const auto &Idx : SubRegIndices) {
OS << " { " << Idx.Offset << ", " << Idx.Size << " },\t// "
<< Idx.getName() << "\n";
}
OS << "};\n\n";
// Emit the string table.
RegStrings.layout();
OS << "extern const char " << TargetName << "RegStrings[] = {\n";
RegStrings.emit(OS, printChar);
OS << "};\n\n";
OS << "extern const MCRegisterDesc " << TargetName
<< "RegDesc[] = { // Descriptors\n";
OS << " { " << RegStrings.get("") << ", 0, 0, 0, 0, 0 },\n";
// Emit the register descriptors now.
i = 0;
for (const auto &Reg : Regs) {
OS << " { " << RegStrings.get(Reg.getName()) << ", "
<< DiffSeqs.get(SubRegLists[i]) << ", " << DiffSeqs.get(SuperRegLists[i])
<< ", " << SubRegIdxSeqs.get(SubRegIdxLists[i]) << ", "
<< (DiffSeqs.get(RegUnitLists[i]) * 16 + RegUnitInitScale[i]) << ", "
<< LaneMaskSeqs.get(RegUnitLaneMasks[i]) << " },\n";
++i;
}
OS << "};\n\n"; // End of register descriptors...
// Emit the table of register unit roots. Each regunit has one or two root
// registers.
OS << "extern const MCPhysReg " << TargetName << "RegUnitRoots[][2] = {\n";
for (unsigned i = 0, e = RegBank.getNumNativeRegUnits(); i != e; ++i) {
ArrayRef<const CodeGenRegister*> Roots = RegBank.getRegUnit(i).getRoots();
assert(!Roots.empty() && "All regunits must have a root register.");
assert(Roots.size() <= 2 && "More than two roots not supported yet.");
OS << " { " << getQualifiedName(Roots.front()->TheDef);
for (unsigned r = 1; r != Roots.size(); ++r)
OS << ", " << getQualifiedName(Roots[r]->TheDef);
OS << " },\n";
}
OS << "};\n\n";
const auto &RegisterClasses = RegBank.getRegClasses();
// Loop over all of the register classes... emitting each one.
OS << "namespace { // Register classes...\n";
SequenceToOffsetTable<std::string> RegClassStrings;
// Emit the register enum value arrays for each RegisterClass
for (const auto &RC : RegisterClasses) {
ArrayRef<Record*> Order = RC.getOrder();
// Give the register class a legal C name if it's anonymous.
std::string Name = RC.getName();
RegClassStrings.add(Name);
// Emit the register list now.
OS << " // " << Name << " Register Class...\n"
<< " const MCPhysReg " << Name
<< "[] = {\n ";
for (unsigned i = 0, e = Order.size(); i != e; ++i) {
Record *Reg = Order[i];
OS << getQualifiedName(Reg) << ", ";
}
OS << "\n };\n\n";
OS << " // " << Name << " Bit set.\n"
<< " const uint8_t " << Name
<< "Bits[] = {\n ";
BitVectorEmitter BVE;
for (unsigned i = 0, e = Order.size(); i != e; ++i) {
Record *Reg = Order[i];
BVE.add(Target.getRegBank().getReg(Reg)->EnumValue);
}
BVE.print(OS);
OS << "\n };\n\n";
}
OS << "}\n\n";
RegClassStrings.layout();
OS << "extern const char " << TargetName << "RegClassStrings[] = {\n";
RegClassStrings.emit(OS, printChar);
OS << "};\n\n";
OS << "extern const MCRegisterClass " << TargetName
<< "MCRegisterClasses[] = {\n";
for (const auto &RC : RegisterClasses) {
// Asserts to make sure values will fit in table assuming types from
// MCRegisterInfo.h
assert((RC.SpillSize/8) <= 0xffff && "SpillSize too large.");
assert((RC.SpillAlignment/8) <= 0xffff && "SpillAlignment too large.");
assert(RC.CopyCost >= -128 && RC.CopyCost <= 127 && "Copy cost too large.");
OS << " { " << RC.getName() << ", " << RC.getName() << "Bits, "
<< RegClassStrings.get(RC.getName()) << ", "
<< RC.getOrder().size() << ", sizeof(" << RC.getName() << "Bits), "
<< RC.getQualifiedName() + "RegClassID" << ", "
<< RC.SpillSize/8 << ", "
<< RC.SpillAlignment/8 << ", "
<< RC.CopyCost << ", "
<< RC.Allocatable << " },\n";
}
OS << "};\n\n";
EmitRegMappingTables(OS, Regs, false);
// Emit Reg encoding table
OS << "extern const uint16_t " << TargetName;
OS << "RegEncodingTable[] = {\n";
// Add entry for NoRegister
OS << " 0,\n";
for (const auto &RE : Regs) {
Record *Reg = RE.TheDef;
BitsInit *BI = Reg->getValueAsBitsInit("HWEncoding");
uint64_t Value = 0;
for (unsigned b = 0, be = BI->getNumBits(); b != be; ++b) {
if (BitInit *B = dyn_cast<BitInit>(BI->getBit(b)))
Value |= (uint64_t)B->getValue() << b;
}
OS << " " << Value << ",\n";
}
OS << "};\n"; // End of HW encoding table
// MCRegisterInfo initialization routine.
OS << "static inline void Init" << TargetName
<< "MCRegisterInfo(MCRegisterInfo *RI, unsigned RA, "
<< "unsigned DwarfFlavour = 0, unsigned EHFlavour = 0, unsigned PC = 0) "
"{\n"
<< " RI->InitMCRegisterInfo(" << TargetName << "RegDesc, "
<< Regs.size() + 1 << ", RA, PC, " << TargetName << "MCRegisterClasses, "
<< RegisterClasses.size() << ", " << TargetName << "RegUnitRoots, "
<< RegBank.getNumNativeRegUnits() << ", " << TargetName << "RegDiffLists, "
<< TargetName << "LaneMaskLists, " << TargetName << "RegStrings, "
<< TargetName << "RegClassStrings, " << TargetName << "SubRegIdxLists, "
<< (std::distance(SubRegIndices.begin(), SubRegIndices.end()) + 1) << ",\n"
<< TargetName << "SubRegIdxRanges, " << TargetName
<< "RegEncodingTable);\n\n";
EmitRegMapping(OS, Regs, false);
OS << "}\n\n";
OS << "} // End llvm namespace\n";
OS << "#endif // GET_REGINFO_MC_DESC\n\n";
}
void
RegisterInfoEmitter::runTargetHeader(raw_ostream &OS, CodeGenTarget &Target,
CodeGenRegBank &RegBank) {
emitSourceFileHeader("Register Information Header Fragment", OS);
OS << "\n#ifdef GET_REGINFO_HEADER\n";
OS << "#undef GET_REGINFO_HEADER\n";
const std::string &TargetName = Target.getName();
std::string ClassName = TargetName + "GenRegisterInfo";
OS << "#include \"llvm/Target/TargetRegisterInfo.h\"\n\n";
OS << "namespace llvm {\n\n";
OS << "class " << TargetName << "FrameLowering;\n\n";
OS << "struct " << ClassName << " : public TargetRegisterInfo {\n"
<< " explicit " << ClassName
<< "(unsigned RA, unsigned D = 0, unsigned E = 0, unsigned PC = 0);\n"
<< " bool needsStackRealignment(const MachineFunction &) const override\n"
<< " { return false; }\n";
if (!RegBank.getSubRegIndices().empty()) {
OS << " unsigned composeSubRegIndicesImpl"
<< "(unsigned, unsigned) const override;\n"
<< " unsigned composeSubRegIndexLaneMaskImpl"
<< "(unsigned, unsigned) const override;\n"
<< " const TargetRegisterClass *getSubClassWithSubReg"
<< "(const TargetRegisterClass*, unsigned) const override;\n";
}
OS << " const RegClassWeight &getRegClassWeight("
<< "const TargetRegisterClass *RC) const override;\n"
<< " unsigned getRegUnitWeight(unsigned RegUnit) const override;\n"
<< " unsigned getNumRegPressureSets() const override;\n"
<< " const char *getRegPressureSetName(unsigned Idx) const override;\n"
<< " unsigned getRegPressureSetLimit(const MachineFunction &MF, unsigned "
"Idx) const override;\n"
<< " const int *getRegClassPressureSets("
<< "const TargetRegisterClass *RC) const override;\n"
<< " const int *getRegUnitPressureSets("
<< "unsigned RegUnit) const override;\n"
<< " ArrayRef<const char *> getRegMaskNames() const override;\n"
<< " ArrayRef<const uint32_t *> getRegMasks() const override;\n"
<< " /// Devirtualized TargetFrameLowering.\n"
<< " static const " << TargetName << "FrameLowering *getFrameLowering(\n"
<< " const MachineFunction &MF);\n"
<< "};\n\n";
const auto &RegisterClasses = RegBank.getRegClasses();
if (!RegisterClasses.empty()) {
OS << "namespace " << RegisterClasses.front().Namespace
<< " { // Register classes\n";
for (const auto &RC : RegisterClasses) {
const std::string &Name = RC.getName();
// Output the extern for the instance.
OS << " extern const TargetRegisterClass " << Name << "RegClass;\n";
}
OS << "} // end of namespace " << TargetName << "\n\n";
}
OS << "} // End llvm namespace\n";
OS << "#endif // GET_REGINFO_HEADER\n\n";
}
//
// runTargetDesc - Output the target register and register file descriptions.
//
void
RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
CodeGenRegBank &RegBank){
emitSourceFileHeader("Target Register and Register Classes Information", OS);
OS << "\n#ifdef GET_REGINFO_TARGET_DESC\n";
OS << "#undef GET_REGINFO_TARGET_DESC\n";
OS << "namespace llvm {\n\n";
// Get access to MCRegisterClass data.
OS << "extern const MCRegisterClass " << Target.getName()
<< "MCRegisterClasses[];\n";
// Start out by emitting each of the register classes.
const auto &RegisterClasses = RegBank.getRegClasses();
const auto &SubRegIndices = RegBank.getSubRegIndices();
// Collect all registers belonging to any allocatable class.
std::set<Record*> AllocatableRegs;
// Collect allocatable registers.
for (const auto &RC : RegisterClasses) {
ArrayRef<Record*> Order = RC.getOrder();
if (RC.Allocatable)
AllocatableRegs.insert(Order.begin(), Order.end());
}
// Build a shared array of value types.
SequenceToOffsetTable<SmallVector<MVT::SimpleValueType, 4> > VTSeqs;
for (const auto &RC : RegisterClasses)
VTSeqs.add(RC.VTs);
VTSeqs.layout();
OS << "\nstatic const MVT::SimpleValueType VTLists[] = {\n";
VTSeqs.emit(OS, printSimpleValueType, "MVT::Other");
OS << "};\n";
// Emit SubRegIndex names, skipping 0.
OS << "\nstatic const char *const SubRegIndexNameTable[] = { \"";
for (const auto &Idx : SubRegIndices) {
OS << Idx.getName();
OS << "\", \"";
}
OS << "\" };\n\n";
// Emit SubRegIndex lane masks, including 0.
OS << "\nstatic const unsigned SubRegIndexLaneMaskTable[] = {\n ~0u,\n";
for (const auto &Idx : SubRegIndices) {
OS << format(" 0x%08x, // ", Idx.LaneMask) << Idx.getName() << '\n';
}
OS << " };\n\n";
OS << "\n";
// Now that all of the structs have been emitted, emit the instances.
if (!RegisterClasses.empty()) {
OS << "\nstatic const TargetRegisterClass *const "
<< "NullRegClasses[] = { nullptr };\n\n";
// Emit register class bit mask tables. The first bit mask emitted for a
// register class, RC, is the set of sub-classes, including RC itself.
//
// If RC has super-registers, also create a list of subreg indices and bit
// masks, (Idx, Mask). The bit mask has a bit for every superreg regclass,
// SuperRC, that satisfies:
//
// For all SuperReg in SuperRC: SuperReg:Idx in RC
//
// The 0-terminated list of subreg indices starts at:
//
// RC->getSuperRegIndices() = SuperRegIdxSeqs + ...
//
// The corresponding bitmasks follow the sub-class mask in memory. Each
// mask has RCMaskWords uint32_t entries.
//
// Every bit mask present in the list has at least one bit set.
// Compress the sub-reg index lists.
typedef std::vector<const CodeGenSubRegIndex*> IdxList;
SmallVector<IdxList, 8> SuperRegIdxLists(RegisterClasses.size());
SequenceToOffsetTable<IdxList, deref<llvm::less>> SuperRegIdxSeqs;
BitVector MaskBV(RegisterClasses.size());
for (const auto &RC : RegisterClasses) {
OS << "static const uint32_t " << RC.getName() << "SubClassMask[] = {\n ";
printBitVectorAsHex(OS, RC.getSubClasses(), 32);
// Emit super-reg class masks for any relevant SubRegIndices that can
// project into RC.
IdxList &SRIList = SuperRegIdxLists[RC.EnumValue];
for (auto &Idx : SubRegIndices) {
MaskBV.reset();
RC.getSuperRegClasses(&Idx, MaskBV);
if (MaskBV.none())
continue;
SRIList.push_back(&Idx);
OS << "\n ";
printBitVectorAsHex(OS, MaskBV, 32);
OS << "// " << Idx.getName();
}
SuperRegIdxSeqs.add(SRIList);
OS << "\n};\n\n";
}
OS << "static const uint16_t SuperRegIdxSeqs[] = {\n";
SuperRegIdxSeqs.layout();
SuperRegIdxSeqs.emit(OS, printSubRegIndex);
OS << "};\n\n";
// Emit NULL terminated super-class lists.
for (const auto &RC : RegisterClasses) {
ArrayRef<CodeGenRegisterClass*> Supers = RC.getSuperClasses();
// Skip classes without supers. We can reuse NullRegClasses.
if (Supers.empty())
continue;
OS << "static const TargetRegisterClass *const "
<< RC.getName() << "Superclasses[] = {\n";
for (const auto *Super : Supers)
OS << " &" << Super->getQualifiedName() << "RegClass,\n";
OS << " nullptr\n};\n\n";
}
// Emit methods.
for (const auto &RC : RegisterClasses) {
if (!RC.AltOrderSelect.empty()) {
OS << "\nstatic inline unsigned " << RC.getName()
<< "AltOrderSelect(const MachineFunction &MF) {"
<< RC.AltOrderSelect << "}\n\n"
<< "static ArrayRef<MCPhysReg> " << RC.getName()
<< "GetRawAllocationOrder(const MachineFunction &MF) {\n";
for (unsigned oi = 1 , oe = RC.getNumOrders(); oi != oe; ++oi) {
ArrayRef<Record*> Elems = RC.getOrder(oi);
if (!Elems.empty()) {
OS << " static const MCPhysReg AltOrder" << oi << "[] = {";
for (unsigned elem = 0; elem != Elems.size(); ++elem)
OS << (elem ? ", " : " ") << getQualifiedName(Elems[elem]);
OS << " };\n";
}
}
OS << " const MCRegisterClass &MCR = " << Target.getName()
<< "MCRegisterClasses[" << RC.getQualifiedName() + "RegClassID];\n"
<< " const ArrayRef<MCPhysReg> Order[] = {\n"
<< " makeArrayRef(MCR.begin(), MCR.getNumRegs()";
for (unsigned oi = 1, oe = RC.getNumOrders(); oi != oe; ++oi)
if (RC.getOrder(oi).empty())
OS << "),\n ArrayRef<MCPhysReg>(";
else
OS << "),\n makeArrayRef(AltOrder" << oi;
OS << ")\n };\n const unsigned Select = " << RC.getName()
<< "AltOrderSelect(MF);\n assert(Select < " << RC.getNumOrders()
<< ");\n return Order[Select];\n}\n";
}
}
// Now emit the actual value-initialized register class instances.
OS << "\nnamespace " << RegisterClasses.front().Namespace
<< " { // Register class instances\n";
for (const auto &RC : RegisterClasses) {
OS << " extern const TargetRegisterClass " << RC.getName()
<< "RegClass = {\n " << '&' << Target.getName()
<< "MCRegisterClasses[" << RC.getName() << "RegClassID],\n "
<< "VTLists + " << VTSeqs.get(RC.VTs) << ",\n " << RC.getName()
<< "SubClassMask,\n SuperRegIdxSeqs + "
<< SuperRegIdxSeqs.get(SuperRegIdxLists[RC.EnumValue]) << ",\n "
<< format("0x%08x,\n ", RC.LaneMask)
<< (unsigned)RC.AllocationPriority << ",\n "
<< (RC.HasDisjunctSubRegs?"true":"false")
<< ", /* HasDisjunctSubRegs */\n ";
if (RC.getSuperClasses().empty())
OS << "NullRegClasses,\n ";
else
OS << RC.getName() << "Superclasses,\n ";
if (RC.AltOrderSelect.empty())
OS << "nullptr\n";
else
OS << RC.getName() << "GetRawAllocationOrder\n";
OS << " };\n\n";
}
OS << "}\n";
}
OS << "\nnamespace {\n";
OS << " const TargetRegisterClass* const RegisterClasses[] = {\n";
for (const auto &RC : RegisterClasses)
OS << " &" << RC.getQualifiedName() << "RegClass,\n";
OS << " };\n";
OS << "}\n"; // End of anonymous namespace...
// Emit extra information about registers.
const std::string &TargetName = Target.getName();
OS << "\nstatic const TargetRegisterInfoDesc "
<< TargetName << "RegInfoDesc[] = { // Extra Descriptors\n";
OS << " { 0, 0 },\n";
const auto &Regs = RegBank.getRegisters();
for (const auto &Reg : Regs) {
OS << " { ";
OS << Reg.CostPerUse << ", "
<< int(AllocatableRegs.count(Reg.TheDef)) << " },\n";
}
OS << "};\n"; // End of register descriptors...
std::string ClassName = Target.getName() + "GenRegisterInfo";
auto SubRegIndicesSize =
std::distance(SubRegIndices.begin(), SubRegIndices.end());
if (!SubRegIndices.empty()) {
emitComposeSubRegIndices(OS, RegBank, ClassName);
emitComposeSubRegIndexLaneMask(OS, RegBank, ClassName);
}
// Emit getSubClassWithSubReg.
if (!SubRegIndices.empty()) {
OS << "const TargetRegisterClass *" << ClassName
<< "::getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx)"
<< " const {\n";
// Use the smallest type that can hold a regclass ID with room for a
// sentinel.
if (RegisterClasses.size() < UINT8_MAX)
OS << " static const uint8_t Table[";
else if (RegisterClasses.size() < UINT16_MAX)
OS << " static const uint16_t Table[";
else
PrintFatalError("Too many register classes.");
OS << RegisterClasses.size() << "][" << SubRegIndicesSize << "] = {\n";
for (const auto &RC : RegisterClasses) {
OS << " {\t// " << RC.getName() << "\n";
for (auto &Idx : SubRegIndices) {
if (CodeGenRegisterClass *SRC = RC.getSubClassWithSubReg(&Idx))
OS << " " << SRC->EnumValue + 1 << ",\t// " << Idx.getName()
<< " -> " << SRC->getName() << "\n";
else
OS << " 0,\t// " << Idx.getName() << "\n";
}
OS << " },\n";
}
OS << " };\n assert(RC && \"Missing regclass\");\n"
<< " if (!Idx) return RC;\n --Idx;\n"
<< " assert(Idx < " << SubRegIndicesSize << " && \"Bad subreg\");\n"
<< " unsigned TV = Table[RC->getID()][Idx];\n"
<< " return TV ? getRegClass(TV - 1) : nullptr;\n}\n\n";
}
EmitRegUnitPressure(OS, RegBank, ClassName);
// Emit the constructor of the class...
OS << "extern const MCRegisterDesc " << TargetName << "RegDesc[];\n";
OS << "extern const MCPhysReg " << TargetName << "RegDiffLists[];\n";
OS << "extern const unsigned " << TargetName << "LaneMaskLists[];\n";
OS << "extern const char " << TargetName << "RegStrings[];\n";
OS << "extern const char " << TargetName << "RegClassStrings[];\n";
OS << "extern const MCPhysReg " << TargetName << "RegUnitRoots[][2];\n";
OS << "extern const uint16_t " << TargetName << "SubRegIdxLists[];\n";
OS << "extern const MCRegisterInfo::SubRegCoveredBits "
<< TargetName << "SubRegIdxRanges[];\n";
OS << "extern const uint16_t " << TargetName << "RegEncodingTable[];\n";
EmitRegMappingTables(OS, Regs, true);
OS << ClassName << "::\n" << ClassName
<< "(unsigned RA, unsigned DwarfFlavour, unsigned EHFlavour, unsigned PC)\n"
<< " : TargetRegisterInfo(" << TargetName << "RegInfoDesc"
<< ", RegisterClasses, RegisterClasses+" << RegisterClasses.size() <<",\n"
<< " SubRegIndexNameTable, SubRegIndexLaneMaskTable, 0x";
OS.write_hex(RegBank.CoveringLanes);
OS << ") {\n"
<< " InitMCRegisterInfo(" << TargetName << "RegDesc, " << Regs.size() + 1
<< ", RA, PC,\n " << TargetName
<< "MCRegisterClasses, " << RegisterClasses.size() << ",\n"
<< " " << TargetName << "RegUnitRoots,\n"
<< " " << RegBank.getNumNativeRegUnits() << ",\n"
<< " " << TargetName << "RegDiffLists,\n"
<< " " << TargetName << "LaneMaskLists,\n"
<< " " << TargetName << "RegStrings,\n"
<< " " << TargetName << "RegClassStrings,\n"
<< " " << TargetName << "SubRegIdxLists,\n"
<< " " << SubRegIndicesSize + 1 << ",\n"
<< " " << TargetName << "SubRegIdxRanges,\n"
<< " " << TargetName << "RegEncodingTable);\n\n";
EmitRegMapping(OS, Regs, true);
OS << "}\n\n";
// Emit CalleeSavedRegs information.
std::vector<Record*> CSRSets =
Records.getAllDerivedDefinitions("CalleeSavedRegs");
for (unsigned i = 0, e = CSRSets.size(); i != e; ++i) {
Record *CSRSet = CSRSets[i];
const SetTheory::RecVec *Regs = RegBank.getSets().expand(CSRSet);
assert(Regs && "Cannot expand CalleeSavedRegs instance");
// Emit the *_SaveList list of callee-saved registers.
OS << "static const MCPhysReg " << CSRSet->getName()
<< "_SaveList[] = { ";
for (unsigned r = 0, re = Regs->size(); r != re; ++r)
OS << getQualifiedName((*Regs)[r]) << ", ";
OS << "0 };\n";
// Emit the *_RegMask bit mask of call-preserved registers.
BitVector Covered = RegBank.computeCoveredRegisters(*Regs);
// Check for an optional OtherPreserved set.
// Add those registers to RegMask, but not to SaveList.
if (DagInit *OPDag =
dyn_cast<DagInit>(CSRSet->getValueInit("OtherPreserved"))) {
SetTheory::RecSet OPSet;
RegBank.getSets().evaluate(OPDag, OPSet, CSRSet->getLoc());
Covered |= RegBank.computeCoveredRegisters(
ArrayRef<Record*>(OPSet.begin(), OPSet.end()));
}
OS << "static const uint32_t " << CSRSet->getName()
<< "_RegMask[] = { ";
printBitVectorAsHex(OS, Covered, 32);
OS << "};\n";
}
OS << "\n\n";
OS << "ArrayRef<const uint32_t *> " << ClassName
<< "::getRegMasks() const {\n";
OS << " static const uint32_t *Masks[] = {\n";
for (Record *CSRSet : CSRSets)
OS << " " << CSRSet->getName() << "_RegMask, \n";
OS << " nullptr\n };\n";
OS << " return ArrayRef<const uint32_t *>(Masks, (size_t)" << CSRSets.size()
<< ");\n";
OS << "}\n\n";
OS << "ArrayRef<const char *> " << ClassName
<< "::getRegMaskNames() const {\n";
OS << " static const char *Names[] = {\n";
for (Record *CSRSet : CSRSets)
OS << " " << '"' << CSRSet->getName() << '"' << ",\n";
OS << " nullptr\n };\n";
OS << " return ArrayRef<const char *>(Names, (size_t)" << CSRSets.size()
<< ");\n";
OS << "}\n\n";
OS << "const " << TargetName << "FrameLowering *"
<< TargetName << "GenRegisterInfo::\n"
<< " getFrameLowering(const MachineFunction &MF) {\n"
<< " return static_cast<const " << TargetName << "FrameLowering *>(\n"
<< " MF.getSubtarget().getFrameLowering());\n"
<< "}\n\n";
OS << "} // End llvm namespace\n";
OS << "#endif // GET_REGINFO_TARGET_DESC\n\n";
}
void RegisterInfoEmitter::run(raw_ostream &OS) {
CodeGenTarget Target(Records);
CodeGenRegBank &RegBank = Target.getRegBank();
RegBank.computeDerivedInfo();
runEnums(OS, Target, RegBank);
runMCDesc(OS, Target, RegBank);
runTargetHeader(OS, Target, RegBank);
runTargetDesc(OS, Target, RegBank);
}
namespace llvm {
void EmitRegisterInfo(RecordKeeper &RK, raw_ostream &OS) {
RegisterInfoEmitter(RK).run(OS);
}
} // End llvm namespace
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/AsmWriterEmitter.cpp | //===- AsmWriterEmitter.cpp - Generate an assembly writer -----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This tablegen backend is emits an assembly printer for the current target.
// Note that this is currently fairly skeletal, but will grow over time.
//
//===----------------------------------------------------------------------===//
#include "AsmWriterInst.h"
#include "CodeGenTarget.h"
#include "SequenceToOffsetTable.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
#include <cassert>
#include <map>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "asm-writer-emitter"
namespace {
class AsmWriterEmitter {
RecordKeeper &Records;
CodeGenTarget Target;
std::map<const CodeGenInstruction*, AsmWriterInst*> CGIAWIMap;
const std::vector<const CodeGenInstruction*> *NumberedInstructions;
std::vector<AsmWriterInst> Instructions;
std::vector<std::string> PrintMethods;
public:
AsmWriterEmitter(RecordKeeper &R);
void run(raw_ostream &o);
private:
void EmitPrintInstruction(raw_ostream &o);
void EmitGetRegisterName(raw_ostream &o);
void EmitPrintAliasInstruction(raw_ostream &O);
AsmWriterInst *getAsmWriterInstByID(unsigned ID) const {
assert(ID < NumberedInstructions->size());
std::map<const CodeGenInstruction*, AsmWriterInst*>::const_iterator I =
CGIAWIMap.find(NumberedInstructions->at(ID));
assert(I != CGIAWIMap.end() && "Didn't find inst!");
return I->second;
}
void FindUniqueOperandCommands(std::vector<std::string> &UOC,
std::vector<unsigned> &InstIdxs,
std::vector<unsigned> &InstOpsUsed) const;
};
} // end anonymous namespace
static void PrintCases(std::vector<std::pair<std::string,
AsmWriterOperand> > &OpsToPrint, raw_ostream &O) {
O << " case " << OpsToPrint.back().first << ": ";
AsmWriterOperand TheOp = OpsToPrint.back().second;
OpsToPrint.pop_back();
// Check to see if any other operands are identical in this list, and if so,
// emit a case label for them.
for (unsigned i = OpsToPrint.size(); i != 0; --i)
if (OpsToPrint[i-1].second == TheOp) {
O << "\n case " << OpsToPrint[i-1].first << ": ";
OpsToPrint.erase(OpsToPrint.begin()+i-1);
}
// Finally, emit the code.
O << TheOp.getCode();
O << "break;\n";
}
/// EmitInstructions - Emit the last instruction in the vector and any other
/// instructions that are suitably similar to it.
static void EmitInstructions(std::vector<AsmWriterInst> &Insts,
raw_ostream &O) {
AsmWriterInst FirstInst = Insts.back();
Insts.pop_back();
std::vector<AsmWriterInst> SimilarInsts;
unsigned DifferingOperand = ~0;
for (unsigned i = Insts.size(); i != 0; --i) {
unsigned DiffOp = Insts[i-1].MatchesAllButOneOp(FirstInst);
if (DiffOp != ~1U) {
if (DifferingOperand == ~0U) // First match!
DifferingOperand = DiffOp;
// If this differs in the same operand as the rest of the instructions in
// this class, move it to the SimilarInsts list.
if (DifferingOperand == DiffOp || DiffOp == ~0U) {
SimilarInsts.push_back(Insts[i-1]);
Insts.erase(Insts.begin()+i-1);
}
}
}
O << " case " << FirstInst.CGI->Namespace << "::"
<< FirstInst.CGI->TheDef->getName() << ":\n";
for (unsigned i = 0, e = SimilarInsts.size(); i != e; ++i)
O << " case " << SimilarInsts[i].CGI->Namespace << "::"
<< SimilarInsts[i].CGI->TheDef->getName() << ":\n";
for (unsigned i = 0, e = FirstInst.Operands.size(); i != e; ++i) {
if (i != DifferingOperand) {
// If the operand is the same for all instructions, just print it.
O << " " << FirstInst.Operands[i].getCode();
} else {
// If this is the operand that varies between all of the instructions,
// emit a switch for just this operand now.
O << " switch (MI->getOpcode()) {\n";
std::vector<std::pair<std::string, AsmWriterOperand> > OpsToPrint;
OpsToPrint.push_back(std::make_pair(FirstInst.CGI->Namespace + "::" +
FirstInst.CGI->TheDef->getName(),
FirstInst.Operands[i]));
for (unsigned si = 0, e = SimilarInsts.size(); si != e; ++si) {
AsmWriterInst &AWI = SimilarInsts[si];
OpsToPrint.push_back(std::make_pair(AWI.CGI->Namespace+"::"+
AWI.CGI->TheDef->getName(),
AWI.Operands[i]));
}
std::reverse(OpsToPrint.begin(), OpsToPrint.end());
while (!OpsToPrint.empty())
PrintCases(OpsToPrint, O);
O << " }";
}
O << "\n";
}
O << " break;\n";
}
void AsmWriterEmitter::
FindUniqueOperandCommands(std::vector<std::string> &UniqueOperandCommands,
std::vector<unsigned> &InstIdxs,
std::vector<unsigned> &InstOpsUsed) const {
InstIdxs.assign(NumberedInstructions->size(), ~0U);
// This vector parallels UniqueOperandCommands, keeping track of which
// instructions each case are used for. It is a comma separated string of
// enums.
std::vector<std::string> InstrsForCase;
InstrsForCase.resize(UniqueOperandCommands.size());
InstOpsUsed.assign(UniqueOperandCommands.size(), 0);
for (unsigned i = 0, e = NumberedInstructions->size(); i != e; ++i) {
const AsmWriterInst *Inst = getAsmWriterInstByID(i);
if (!Inst)
continue; // PHI, INLINEASM, CFI_INSTRUCTION, etc.
std::string Command;
if (Inst->Operands.empty())
continue; // Instruction already done.
Command = " " + Inst->Operands[0].getCode() + "\n";
// Check to see if we already have 'Command' in UniqueOperandCommands.
// If not, add it.
bool FoundIt = false;
for (unsigned idx = 0, e = UniqueOperandCommands.size(); idx != e; ++idx)
if (UniqueOperandCommands[idx] == Command) {
InstIdxs[i] = idx;
InstrsForCase[idx] += ", ";
InstrsForCase[idx] += Inst->CGI->TheDef->getName();
FoundIt = true;
break;
}
if (!FoundIt) {
InstIdxs[i] = UniqueOperandCommands.size();
UniqueOperandCommands.push_back(Command);
InstrsForCase.push_back(Inst->CGI->TheDef->getName());
// This command matches one operand so far.
InstOpsUsed.push_back(1);
}
}
// For each entry of UniqueOperandCommands, there is a set of instructions
// that uses it. If the next command of all instructions in the set are
// identical, fold it into the command.
for (unsigned CommandIdx = 0, e = UniqueOperandCommands.size();
CommandIdx != e; ++CommandIdx) {
for (unsigned Op = 1; ; ++Op) {
// Scan for the first instruction in the set.
std::vector<unsigned>::iterator NIT =
std::find(InstIdxs.begin(), InstIdxs.end(), CommandIdx);
if (NIT == InstIdxs.end()) break; // No commonality.
// If this instruction has no more operands, we isn't anything to merge
// into this command.
const AsmWriterInst *FirstInst =
getAsmWriterInstByID(NIT-InstIdxs.begin());
if (!FirstInst || FirstInst->Operands.size() == Op)
break;
// Otherwise, scan to see if all of the other instructions in this command
// set share the operand.
bool AllSame = true;
for (NIT = std::find(NIT+1, InstIdxs.end(), CommandIdx);
NIT != InstIdxs.end();
NIT = std::find(NIT+1, InstIdxs.end(), CommandIdx)) {
// Okay, found another instruction in this command set. If the operand
// matches, we're ok, otherwise bail out.
const AsmWriterInst *OtherInst =
getAsmWriterInstByID(NIT-InstIdxs.begin());
if (!OtherInst || OtherInst->Operands.size() == Op ||
OtherInst->Operands[Op] != FirstInst->Operands[Op]) {
AllSame = false;
break;
}
}
if (!AllSame) break;
// Okay, everything in this command set has the same next operand. Add it
// to UniqueOperandCommands and remember that it was consumed.
std::string Command = " " + FirstInst->Operands[Op].getCode() + "\n";
UniqueOperandCommands[CommandIdx] += Command;
InstOpsUsed[CommandIdx]++;
}
}
// Prepend some of the instructions each case is used for onto the case val.
for (unsigned i = 0, e = InstrsForCase.size(); i != e; ++i) {
std::string Instrs = InstrsForCase[i];
if (Instrs.size() > 70) {
Instrs.erase(Instrs.begin()+70, Instrs.end());
Instrs += "...";
}
if (!Instrs.empty())
UniqueOperandCommands[i] = " // " + Instrs + "\n" +
UniqueOperandCommands[i];
}
}
static void UnescapeString(std::string &Str) {
for (unsigned i = 0; i != Str.size(); ++i) {
if (Str[i] == '\\' && i != Str.size()-1) {
switch (Str[i+1]) {
default: continue; // Don't execute the code after the switch.
case 'a': Str[i] = '\a'; break;
case 'b': Str[i] = '\b'; break;
case 'e': Str[i] = 27; break;
case 'f': Str[i] = '\f'; break;
case 'n': Str[i] = '\n'; break;
case 'r': Str[i] = '\r'; break;
case 't': Str[i] = '\t'; break;
case 'v': Str[i] = '\v'; break;
case '"': Str[i] = '\"'; break;
case '\'': Str[i] = '\''; break;
case '\\': Str[i] = '\\'; break;
}
// Nuke the second character.
Str.erase(Str.begin()+i+1);
}
}
}
/// EmitPrintInstruction - Generate the code for the "printInstruction" method
/// implementation. Destroys all instances of AsmWriterInst information, by
/// clearing the Instructions vector.
void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
Record *AsmWriter = Target.getAsmWriter();
std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
unsigned PassSubtarget = AsmWriter->getValueAsInt("PassSubtarget");
O <<
"/// printInstruction - This method is automatically generated by tablegen\n"
"/// from the instruction set description.\n"
"void " << Target.getName() << ClassName
<< "::printInstruction(const MCInst *MI, "
<< (PassSubtarget ? "const MCSubtargetInfo &STI, " : "")
<< "raw_ostream &O) {\n";
// Build an aggregate string, and build a table of offsets into it.
SequenceToOffsetTable<std::string> StringTable;
/// OpcodeInfo - This encodes the index of the string to use for the first
/// chunk of the output as well as indices used for operand printing.
/// To reduce the number of unhandled cases, we expand the size from 32-bit
/// to 32+16 = 48-bit.
std::vector<uint64_t> OpcodeInfo;
// Add all strings to the string table upfront so it can generate an optimized
// representation.
for (unsigned i = 0, e = NumberedInstructions->size(); i != e; ++i) {
AsmWriterInst *AWI = CGIAWIMap[NumberedInstructions->at(i)];
if (AWI &&
AWI->Operands[0].OperandType ==
AsmWriterOperand::isLiteralTextOperand &&
!AWI->Operands[0].Str.empty()) {
std::string Str = AWI->Operands[0].Str;
UnescapeString(Str);
StringTable.add(Str);
}
}
StringTable.layout();
unsigned MaxStringIdx = 0;
for (unsigned i = 0, e = NumberedInstructions->size(); i != e; ++i) {
AsmWriterInst *AWI = CGIAWIMap[NumberedInstructions->at(i)];
unsigned Idx;
if (!AWI) {
// Something not handled by the asmwriter printer.
Idx = ~0U;
} else if (AWI->Operands[0].OperandType !=
AsmWriterOperand::isLiteralTextOperand ||
AWI->Operands[0].Str.empty()) {
// Something handled by the asmwriter printer, but with no leading string.
Idx = StringTable.get("");
} else {
std::string Str = AWI->Operands[0].Str;
UnescapeString(Str);
Idx = StringTable.get(Str);
MaxStringIdx = std::max(MaxStringIdx, Idx);
// Nuke the string from the operand list. It is now handled!
AWI->Operands.erase(AWI->Operands.begin());
}
// Bias offset by one since we want 0 as a sentinel.
OpcodeInfo.push_back(Idx+1);
}
// Figure out how many bits we used for the string index.
unsigned AsmStrBits = Log2_32_Ceil(MaxStringIdx+2);
// To reduce code size, we compactify common instructions into a few bits
// in the opcode-indexed table.
unsigned BitsLeft = 64-AsmStrBits;
std::vector<std::vector<std::string>> TableDrivenOperandPrinters;
while (1) {
std::vector<std::string> UniqueOperandCommands;
std::vector<unsigned> InstIdxs;
std::vector<unsigned> NumInstOpsHandled;
FindUniqueOperandCommands(UniqueOperandCommands, InstIdxs,
NumInstOpsHandled);
// If we ran out of operands to print, we're done.
if (UniqueOperandCommands.empty()) break;
// Compute the number of bits we need to represent these cases, this is
// ceil(log2(numentries)).
unsigned NumBits = Log2_32_Ceil(UniqueOperandCommands.size());
// If we don't have enough bits for this operand, don't include it.
if (NumBits > BitsLeft) {
DEBUG(errs() << "Not enough bits to densely encode " << NumBits
<< " more bits\n");
break;
}
// Otherwise, we can include this in the initial lookup table. Add it in.
for (unsigned i = 0, e = InstIdxs.size(); i != e; ++i)
if (InstIdxs[i] != ~0U) {
OpcodeInfo[i] |= (uint64_t)InstIdxs[i] << (64-BitsLeft);
}
BitsLeft -= NumBits;
// Remove the info about this operand.
for (unsigned i = 0, e = NumberedInstructions->size(); i != e; ++i) {
if (AsmWriterInst *Inst = getAsmWriterInstByID(i))
if (!Inst->Operands.empty()) {
unsigned NumOps = NumInstOpsHandled[InstIdxs[i]];
assert(NumOps <= Inst->Operands.size() &&
"Can't remove this many ops!");
Inst->Operands.erase(Inst->Operands.begin(),
Inst->Operands.begin()+NumOps);
}
}
// Remember the handlers for this set of operands.
TableDrivenOperandPrinters.push_back(std::move(UniqueOperandCommands));
}
// We always emit at least one 32-bit table. A second table is emitted if
// more bits are needed.
O<<" static const uint32_t OpInfo[] = {\n";
for (unsigned i = 0, e = NumberedInstructions->size(); i != e; ++i) {
O << " " << (OpcodeInfo[i] & 0xffffffff) << "U,\t// "
<< NumberedInstructions->at(i)->TheDef->getName() << "\n";
}
// Add a dummy entry so the array init doesn't end with a comma.
O << " 0U\n";
O << " };\n\n";
if (BitsLeft < 32) {
// Add a second OpInfo table only when it is necessary.
// Adjust the type of the second table based on the number of bits needed.
O << " static const uint"
<< ((BitsLeft < 16) ? "32" : (BitsLeft < 24) ? "16" : "8")
<< "_t OpInfo2[] = {\n";
for (unsigned i = 0, e = NumberedInstructions->size(); i != e; ++i) {
O << " " << (OpcodeInfo[i] >> 32) << "U,\t// "
<< NumberedInstructions->at(i)->TheDef->getName() << "\n";
}
// Add a dummy entry so the array init doesn't end with a comma.
O << " 0U\n";
O << " };\n\n";
}
// Emit the string itself.
O << " static const char AsmStrs[] = {\n";
StringTable.emit(O, printChar);
O << " };\n\n";
O << " O << \"\\t\";\n\n";
O << " // Emit the opcode for the instruction.\n";
if (BitsLeft < 32) {
// If we have two tables then we need to perform two lookups and combine
// the results into a single 64-bit value.
O << " uint64_t Bits1 = OpInfo[MI->getOpcode()];\n"
<< " uint64_t Bits2 = OpInfo2[MI->getOpcode()];\n"
<< " uint64_t Bits = (Bits2 << 32) | Bits1;\n";
} else {
// If only one table is used we just need to perform a single lookup.
O << " uint32_t Bits = OpInfo[MI->getOpcode()];\n";
}
O << " assert(Bits != 0 && \"Cannot print this instruction.\");\n"
<< " O << AsmStrs+(Bits & " << (1 << AsmStrBits)-1 << ")-1;\n\n";
// Output the table driven operand information.
BitsLeft = 64-AsmStrBits;
for (unsigned i = 0, e = TableDrivenOperandPrinters.size(); i != e; ++i) {
std::vector<std::string> &Commands = TableDrivenOperandPrinters[i];
// Compute the number of bits we need to represent these cases, this is
// ceil(log2(numentries)).
unsigned NumBits = Log2_32_Ceil(Commands.size());
assert(NumBits <= BitsLeft && "consistency error");
// Emit code to extract this field from Bits.
O << "\n // Fragment " << i << " encoded into " << NumBits
<< " bits for " << Commands.size() << " unique commands.\n";
if (Commands.size() == 2) {
// Emit two possibilitys with if/else.
O << " if ((Bits >> "
<< (64-BitsLeft) << ") & "
<< ((1 << NumBits)-1) << ") {\n"
<< Commands[1]
<< " } else {\n"
<< Commands[0]
<< " }\n\n";
} else if (Commands.size() == 1) {
// Emit a single possibility.
O << Commands[0] << "\n\n";
} else {
O << " switch ((Bits >> "
<< (64-BitsLeft) << ") & "
<< ((1 << NumBits)-1) << ") {\n"
<< " default: llvm_unreachable(\"Invalid command number.\");\n";
// Print out all the cases.
for (unsigned i = 0, e = Commands.size(); i != e; ++i) {
O << " case " << i << ":\n";
O << Commands[i];
O << " break;\n";
}
O << " }\n\n";
}
BitsLeft -= NumBits;
}
// Okay, delete instructions with no operand info left.
for (unsigned i = 0, e = Instructions.size(); i != e; ++i) {
// Entire instruction has been emitted?
AsmWriterInst &Inst = Instructions[i];
if (Inst.Operands.empty()) {
Instructions.erase(Instructions.begin()+i);
--i; --e;
}
}
// Because this is a vector, we want to emit from the end. Reverse all of the
// elements in the vector.
std::reverse(Instructions.begin(), Instructions.end());
// Now that we've emitted all of the operand info that fit into 32 bits, emit
// information for those instructions that are left. This is a less dense
// encoding, but we expect the main 32-bit table to handle the majority of
// instructions.
if (!Instructions.empty()) {
// Find the opcode # of inline asm.
O << " switch (MI->getOpcode()) {\n";
while (!Instructions.empty())
EmitInstructions(Instructions, O);
O << " }\n";
O << " return;\n";
}
O << "}\n";
}
static const char *getMinimalTypeForRange(uint64_t Range) {
assert(Range < 0xFFFFFFFFULL && "Enum too large");
if (Range > 0xFFFF)
return "uint32_t";
if (Range > 0xFF)
return "uint16_t";
return "uint8_t";
}
static void
emitRegisterNameString(raw_ostream &O, StringRef AltName,
const std::deque<CodeGenRegister> &Registers) {
SequenceToOffsetTable<std::string> StringTable;
SmallVector<std::string, 4> AsmNames(Registers.size());
unsigned i = 0;
for (const auto &Reg : Registers) {
std::string &AsmName = AsmNames[i++];
// "NoRegAltName" is special. We don't need to do a lookup for that,
// as it's just a reference to the default register name.
if (AltName == "" || AltName == "NoRegAltName") {
AsmName = Reg.TheDef->getValueAsString("AsmName");
if (AsmName.empty())
AsmName = Reg.getName();
} else {
// Make sure the register has an alternate name for this index.
std::vector<Record*> AltNameList =
Reg.TheDef->getValueAsListOfDefs("RegAltNameIndices");
unsigned Idx = 0, e;
for (e = AltNameList.size();
Idx < e && (AltNameList[Idx]->getName() != AltName);
++Idx)
;
// If the register has an alternate name for this index, use it.
// Otherwise, leave it empty as an error flag.
if (Idx < e) {
std::vector<std::string> AltNames =
Reg.TheDef->getValueAsListOfStrings("AltNames");
if (AltNames.size() <= Idx)
PrintFatalError(Reg.TheDef->getLoc(),
"Register definition missing alt name for '" +
AltName + "'.");
AsmName = AltNames[Idx];
}
}
StringTable.add(AsmName);
}
StringTable.layout();
O << " static const char AsmStrs" << AltName << "[] = {\n";
StringTable.emit(O, printChar);
O << " };\n\n";
O << " static const " << getMinimalTypeForRange(StringTable.size()-1)
<< " RegAsmOffset" << AltName << "[] = {";
for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
if ((i % 14) == 0)
O << "\n ";
O << StringTable.get(AsmNames[i]) << ", ";
}
O << "\n };\n"
<< "\n";
}
void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) {
Record *AsmWriter = Target.getAsmWriter();
std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
const auto &Registers = Target.getRegBank().getRegisters();
std::vector<Record*> AltNameIndices = Target.getRegAltNameIndices();
bool hasAltNames = AltNameIndices.size() > 1;
O <<
"\n\n/// getRegisterName - This method is automatically generated by tblgen\n"
"/// from the register set description. This returns the assembler name\n"
"/// for the specified register.\n"
"const char *" << Target.getName() << ClassName << "::";
if (hasAltNames)
O << "\ngetRegisterName(unsigned RegNo, unsigned AltIdx) {\n";
else
O << "getRegisterName(unsigned RegNo) {\n";
O << " assert(RegNo && RegNo < " << (Registers.size()+1)
<< " && \"Invalid register number!\");\n"
<< "\n";
if (hasAltNames) {
for (unsigned i = 0, e = AltNameIndices.size(); i < e; ++i)
emitRegisterNameString(O, AltNameIndices[i]->getName(), Registers);
} else
emitRegisterNameString(O, "", Registers);
if (hasAltNames) {
O << " switch(AltIdx) {\n"
<< " default: llvm_unreachable(\"Invalid register alt name index!\");\n";
for (unsigned i = 0, e = AltNameIndices.size(); i < e; ++i) {
std::string Namespace = AltNameIndices[1]->getValueAsString("Namespace");
std::string AltName(AltNameIndices[i]->getName());
O << " case " << Namespace << "::" << AltName << ":\n"
<< " assert(*(AsmStrs" << AltName << "+RegAsmOffset"
<< AltName << "[RegNo-1]) &&\n"
<< " \"Invalid alt name index for register!\");\n"
<< " return AsmStrs" << AltName << "+RegAsmOffset"
<< AltName << "[RegNo-1];\n";
}
O << " }\n";
} else {
O << " assert (*(AsmStrs+RegAsmOffset[RegNo-1]) &&\n"
<< " \"Invalid alt name index for register!\");\n"
<< " return AsmStrs+RegAsmOffset[RegNo-1];\n";
}
O << "}\n";
}
namespace {
// IAPrinter - Holds information about an InstAlias. Two InstAliases match if
// they both have the same conditionals. In which case, we cannot print out the
// alias for that pattern.
class IAPrinter {
std::vector<std::string> Conds;
std::map<StringRef, std::pair<int, int>> OpMap;
SmallVector<Record*, 4> ReqFeatures;
std::string Result;
std::string AsmString;
public:
IAPrinter(std::string R, std::string AS) : Result(R), AsmString(AS) {}
void addCond(const std::string &C) { Conds.push_back(C); }
void addOperand(StringRef Op, int OpIdx, int PrintMethodIdx = -1) {
assert(OpIdx >= 0 && OpIdx < 0xFE && "Idx out of range");
assert(PrintMethodIdx >= -1 && PrintMethodIdx < 0xFF &&
"Idx out of range");
OpMap[Op] = std::make_pair(OpIdx, PrintMethodIdx);
}
bool isOpMapped(StringRef Op) { return OpMap.find(Op) != OpMap.end(); }
int getOpIndex(StringRef Op) { return OpMap[Op].first; }
std::pair<int, int> &getOpData(StringRef Op) { return OpMap[Op]; }
std::pair<StringRef, StringRef::iterator> parseName(StringRef::iterator Start,
StringRef::iterator End) {
StringRef::iterator I = Start;
StringRef::iterator Next;
if (*I == '{') {
// ${some_name}
Start = ++I;
while (I != End && *I != '}')
++I;
Next = I;
// eat the final '}'
if (Next != End)
++Next;
} else {
// $name, just eat the usual suspects.
while (I != End &&
((*I >= 'a' && *I <= 'z') || (*I >= 'A' && *I <= 'Z') ||
(*I >= '0' && *I <= '9') || *I == '_'))
++I;
Next = I;
}
return std::make_pair(StringRef(Start, I - Start), Next);
}
void print(raw_ostream &O) {
if (Conds.empty() && ReqFeatures.empty()) {
O.indent(6) << "return true;\n";
return;
}
O << "if (";
for (std::vector<std::string>::iterator
I = Conds.begin(), E = Conds.end(); I != E; ++I) {
if (I != Conds.begin()) {
O << " &&\n";
O.indent(8);
}
O << *I;
}
O << ") {\n";
O.indent(6) << "// " << Result << "\n";
// Directly mangle mapped operands into the string. Each operand is
// identified by a '$' sign followed by a byte identifying the number of the
// operand. We add one to the index to avoid zero bytes.
StringRef ASM(AsmString);
SmallString<128> OutString;
raw_svector_ostream OS(OutString);
for (StringRef::iterator I = ASM.begin(), E = ASM.end(); I != E;) {
OS << *I;
if (*I == '$') {
StringRef Name;
std::tie(Name, I) = parseName(++I, E);
assert(isOpMapped(Name) && "Unmapped operand!");
int OpIndex, PrintIndex;
std::tie(OpIndex, PrintIndex) = getOpData(Name);
if (PrintIndex == -1) {
// Can use the default printOperand route.
OS << format("\\x%02X", (unsigned char)OpIndex + 1);
} else
// 3 bytes if a PrintMethod is needed: 0xFF, the MCInst operand
// number, and which of our pre-detected Methods to call.
OS << format("\\xFF\\x%02X\\x%02X", OpIndex + 1, PrintIndex + 1);
} else {
++I;
}
}
OS.flush();
// Emit the string.
O.indent(6) << "AsmString = \"" << OutString << "\";\n";
O.indent(6) << "break;\n";
O.indent(4) << '}';
}
bool operator==(const IAPrinter &RHS) const {
if (Conds.size() != RHS.Conds.size())
return false;
unsigned Idx = 0;
for (const auto &str : Conds)
if (str != RHS.Conds[Idx++])
return false;
return true;
}
};
} // end anonymous namespace
static unsigned CountNumOperands(StringRef AsmString, unsigned Variant) {
std::string FlatAsmString =
CodeGenInstruction::FlattenAsmStringVariants(AsmString, Variant);
AsmString = FlatAsmString;
return AsmString.count(' ') + AsmString.count('\t');
}
namespace {
struct AliasPriorityComparator {
typedef std::pair<CodeGenInstAlias *, int> ValueType;
bool operator()(const ValueType &LHS, const ValueType &RHS) const {
if (LHS.second == RHS.second) {
// We don't actually care about the order, but for consistency it
// shouldn't depend on pointer comparisons.
return LHS.first->TheDef->getName() < RHS.first->TheDef->getName();
}
// Aliases with larger priorities should be considered first.
return LHS.second > RHS.second;
}
};
}
void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
Record *AsmWriter = Target.getAsmWriter();
O << "\n#ifdef PRINT_ALIAS_INSTR\n";
O << "#undef PRINT_ALIAS_INSTR\n\n";
//////////////////////////////
// Gather information about aliases we need to print
//////////////////////////////
// Emit the method that prints the alias instruction.
std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
unsigned Variant = AsmWriter->getValueAsInt("Variant");
unsigned PassSubtarget = AsmWriter->getValueAsInt("PassSubtarget");
std::vector<Record*> AllInstAliases =
Records.getAllDerivedDefinitions("InstAlias");
// Create a map from the qualified name to a list of potential matches.
typedef std::set<std::pair<CodeGenInstAlias*, int>, AliasPriorityComparator>
AliasWithPriority;
std::map<std::string, AliasWithPriority> AliasMap;
for (std::vector<Record*>::iterator
I = AllInstAliases.begin(), E = AllInstAliases.end(); I != E; ++I) {
CodeGenInstAlias *Alias = new CodeGenInstAlias(*I, Variant, Target);
const Record *R = *I;
int Priority = R->getValueAsInt("EmitPriority");
if (Priority < 1)
continue; // Aliases with priority 0 are never emitted.
const DagInit *DI = R->getValueAsDag("ResultInst");
const DefInit *Op = cast<DefInit>(DI->getOperator());
AliasMap[getQualifiedName(Op->getDef())].insert(std::make_pair(Alias,
Priority));
}
// A map of which conditions need to be met for each instruction operand
// before it can be matched to the mnemonic.
std::map<std::string, std::vector<IAPrinter*> > IAPrinterMap;
// A list of MCOperandPredicates for all operands in use, and the reverse map
std::vector<const Record*> MCOpPredicates;
DenseMap<const Record*, unsigned> MCOpPredicateMap;
for (auto &Aliases : AliasMap) {
for (auto &Alias : Aliases.second) {
const CodeGenInstAlias *CGA = Alias.first;
unsigned LastOpNo = CGA->ResultInstOperandIndex.size();
unsigned NumResultOps =
CountNumOperands(CGA->ResultInst->AsmString, Variant);
// Don't emit the alias if it has more operands than what it's aliasing.
if (NumResultOps < CountNumOperands(CGA->AsmString, Variant))
continue;
IAPrinter *IAP = new IAPrinter(CGA->Result->getAsString(),
CGA->AsmString);
unsigned NumMIOps = 0;
for (auto &Operand : CGA->ResultOperands)
NumMIOps += Operand.getMINumOperands();
std::string Cond;
Cond = std::string("MI->getNumOperands() == ") + llvm::utostr(NumMIOps);
IAP->addCond(Cond);
bool CantHandle = false;
unsigned MIOpNum = 0;
for (unsigned i = 0, e = LastOpNo; i != e; ++i) {
std::string Op = "MI->getOperand(" + llvm::utostr(MIOpNum) + ")";
const CodeGenInstAlias::ResultOperand &RO = CGA->ResultOperands[i];
switch (RO.Kind) {
case CodeGenInstAlias::ResultOperand::K_Record: {
const Record *Rec = RO.getRecord();
StringRef ROName = RO.getName();
int PrintMethodIdx = -1;
// These two may have a PrintMethod, which we want to record (if it's
// the first time we've seen it) and provide an index for the aliasing
// code to use.
if (Rec->isSubClassOf("RegisterOperand") ||
Rec->isSubClassOf("Operand")) {
std::string PrintMethod = Rec->getValueAsString("PrintMethod");
if (PrintMethod != "" && PrintMethod != "printOperand") {
PrintMethodIdx = std::find(PrintMethods.begin(),
PrintMethods.end(), PrintMethod) -
PrintMethods.begin();
if (static_cast<unsigned>(PrintMethodIdx) == PrintMethods.size())
PrintMethods.push_back(PrintMethod);
}
}
if (Rec->isSubClassOf("RegisterOperand"))
Rec = Rec->getValueAsDef("RegClass");
if (Rec->isSubClassOf("RegisterClass")) {
IAP->addCond(Op + ".isReg()");
if (!IAP->isOpMapped(ROName)) {
IAP->addOperand(ROName, MIOpNum, PrintMethodIdx);
Record *R = CGA->ResultOperands[i].getRecord();
if (R->isSubClassOf("RegisterOperand"))
R = R->getValueAsDef("RegClass");
Cond = std::string("MRI.getRegClass(") + Target.getName() + "::" +
R->getName() + "RegClassID)"
".contains(" + Op + ".getReg())";
} else {
Cond = Op + ".getReg() == MI->getOperand(" +
llvm::utostr(IAP->getOpIndex(ROName)) + ").getReg()";
}
} else {
// Assume all printable operands are desired for now. This can be
// overridden in the InstAlias instantiation if necessary.
IAP->addOperand(ROName, MIOpNum, PrintMethodIdx);
// There might be an additional predicate on the MCOperand
unsigned Entry = MCOpPredicateMap[Rec];
if (!Entry) {
if (!Rec->isValueUnset("MCOperandPredicate")) {
MCOpPredicates.push_back(Rec);
Entry = MCOpPredicates.size();
MCOpPredicateMap[Rec] = Entry;
} else
break; // No conditions on this operand at all
}
Cond = Target.getName() + ClassName + "ValidateMCOperand(" +
Op + ", " + llvm::utostr(Entry) + ")";
}
// for all subcases of ResultOperand::K_Record:
IAP->addCond(Cond);
break;
}
case CodeGenInstAlias::ResultOperand::K_Imm: {
// Just because the alias has an immediate result, doesn't mean the
// MCInst will. An MCExpr could be present, for example.
IAP->addCond(Op + ".isImm()");
Cond = Op + ".getImm() == "
+ llvm::utostr(CGA->ResultOperands[i].getImm());
IAP->addCond(Cond);
break;
}
case CodeGenInstAlias::ResultOperand::K_Reg:
// If this is zero_reg, something's playing tricks we're not
// equipped to handle.
if (!CGA->ResultOperands[i].getRegister()) {
CantHandle = true;
break;
}
Cond = Op + ".getReg() == " + Target.getName() +
"::" + CGA->ResultOperands[i].getRegister()->getName();
IAP->addCond(Cond);
break;
}
if (!IAP) break;
MIOpNum += RO.getMINumOperands();
}
if (CantHandle) continue;
IAPrinterMap[Aliases.first].push_back(IAP);
}
}
//////////////////////////////
// Write out the printAliasInstr function
//////////////////////////////
std::string Header;
raw_string_ostream HeaderO(Header);
HeaderO << "bool " << Target.getName() << ClassName
<< "::printAliasInstr(const MCInst"
<< " *MI, " << (PassSubtarget ? "const MCSubtargetInfo &STI, " : "")
<< "raw_ostream &OS) {\n";
std::string Cases;
raw_string_ostream CasesO(Cases);
for (std::map<std::string, std::vector<IAPrinter*> >::iterator
I = IAPrinterMap.begin(), E = IAPrinterMap.end(); I != E; ++I) {
std::vector<IAPrinter*> &IAPs = I->second;
std::vector<IAPrinter*> UniqueIAPs;
for (std::vector<IAPrinter*>::iterator
II = IAPs.begin(), IE = IAPs.end(); II != IE; ++II) {
IAPrinter *LHS = *II;
bool IsDup = false;
for (std::vector<IAPrinter*>::iterator
III = IAPs.begin(), IIE = IAPs.end(); III != IIE; ++III) {
IAPrinter *RHS = *III;
if (LHS != RHS && *LHS == *RHS) {
IsDup = true;
break;
}
}
if (!IsDup) UniqueIAPs.push_back(LHS);
}
if (UniqueIAPs.empty()) continue;
CasesO.indent(2) << "case " << I->first << ":\n";
for (std::vector<IAPrinter*>::iterator
II = UniqueIAPs.begin(), IE = UniqueIAPs.end(); II != IE; ++II) {
IAPrinter *IAP = *II;
CasesO.indent(4);
IAP->print(CasesO);
CasesO << '\n';
}
CasesO.indent(4) << "return false;\n";
}
if (CasesO.str().empty()) {
O << HeaderO.str();
O << " return false;\n";
O << "}\n\n";
O << "#endif // PRINT_ALIAS_INSTR\n";
return;
}
if (!MCOpPredicates.empty())
O << "static bool " << Target.getName() << ClassName
<< "ValidateMCOperand(\n"
<< " const MCOperand &MCOp, unsigned PredicateIndex);\n";
O << HeaderO.str();
O.indent(2) << "const char *AsmString;\n";
O.indent(2) << "switch (MI->getOpcode()) {\n";
O.indent(2) << "default: return false;\n";
O << CasesO.str();
O.indent(2) << "}\n\n";
// Code that prints the alias, replacing the operands with the ones from the
// MCInst.
O << " unsigned I = 0;\n";
O << " while (AsmString[I] != ' ' && AsmString[I] != '\t' &&\n";
O << " AsmString[I] != '\\0')\n";
O << " ++I;\n";
O << " OS << '\\t' << StringRef(AsmString, I);\n";
O << " if (AsmString[I] != '\\0') {\n";
O << " OS << '\\t';\n";
O << " do {\n";
O << " if (AsmString[I] == '$') {\n";
O << " ++I;\n";
O << " if (AsmString[I] == (char)0xff) {\n";
O << " ++I;\n";
O << " int OpIdx = AsmString[I++] - 1;\n";
O << " int PrintMethodIdx = AsmString[I++] - 1;\n";
O << " printCustomAliasOperand(MI, OpIdx, PrintMethodIdx, ";
O << (PassSubtarget ? "STI, " : "");
O << "OS);\n";
O << " } else\n";
O << " printOperand(MI, unsigned(AsmString[I++]) - 1, ";
O << (PassSubtarget ? "STI, " : "");
O << "OS);\n";
O << " } else {\n";
O << " OS << AsmString[I++];\n";
O << " }\n";
O << " } while (AsmString[I] != '\\0');\n";
O << " }\n\n";
O << " return true;\n";
O << "}\n\n";
//////////////////////////////
// Write out the printCustomAliasOperand function
//////////////////////////////
O << "void " << Target.getName() << ClassName << "::"
<< "printCustomAliasOperand(\n"
<< " const MCInst *MI, unsigned OpIdx,\n"
<< " unsigned PrintMethodIdx,\n"
<< (PassSubtarget ? " const MCSubtargetInfo &STI,\n" : "")
<< " raw_ostream &OS) {\n";
if (PrintMethods.empty())
O << " llvm_unreachable(\"Unknown PrintMethod kind\");\n";
else {
O << " switch (PrintMethodIdx) {\n"
<< " default:\n"
<< " llvm_unreachable(\"Unknown PrintMethod kind\");\n"
<< " break;\n";
for (unsigned i = 0; i < PrintMethods.size(); ++i) {
O << " case " << i << ":\n"
<< " " << PrintMethods[i] << "(MI, OpIdx, "
<< (PassSubtarget ? "STI, " : "") << "OS);\n"
<< " break;\n";
}
O << " }\n";
}
O << "}\n\n";
if (!MCOpPredicates.empty()) {
O << "static bool " << Target.getName() << ClassName
<< "ValidateMCOperand(\n"
<< " const MCOperand &MCOp, unsigned PredicateIndex) {\n"
<< " switch (PredicateIndex) {\n"
<< " default:\n"
<< " llvm_unreachable(\"Unknown MCOperandPredicate kind\");\n"
<< " break;\n";
for (unsigned i = 0; i < MCOpPredicates.size(); ++i) {
Init *MCOpPred = MCOpPredicates[i]->getValueInit("MCOperandPredicate");
if (StringInit *SI = dyn_cast<StringInit>(MCOpPred)) {
O << " case " << i + 1 << ": {\n"
<< SI->getValue() << "\n"
<< " }\n";
} else
llvm_unreachable("Unexpected MCOperandPredicate field!");
}
O << " }\n"
<< "}\n\n";
}
O << "#endif // PRINT_ALIAS_INSTR\n";
}
AsmWriterEmitter::AsmWriterEmitter(RecordKeeper &R) : Records(R), Target(R) {
Record *AsmWriter = Target.getAsmWriter();
for (const CodeGenInstruction *I : Target.instructions())
if (!I->AsmString.empty() && I->TheDef->getName() != "PHI")
Instructions.emplace_back(*I, AsmWriter->getValueAsInt("Variant"),
AsmWriter->getValueAsInt("PassSubtarget"));
// Get the instruction numbering.
NumberedInstructions = &Target.getInstructionsByEnumValue();
// Compute the CodeGenInstruction -> AsmWriterInst mapping. Note that not
// all machine instructions are necessarily being printed, so there may be
// target instructions not in this map.
for (unsigned i = 0, e = Instructions.size(); i != e; ++i)
CGIAWIMap.insert(std::make_pair(Instructions[i].CGI, &Instructions[i]));
}
void AsmWriterEmitter::run(raw_ostream &O) {
EmitPrintInstruction(O);
EmitGetRegisterName(O);
EmitPrintAliasInstruction(O);
}
namespace llvm {
void EmitAsmWriter(RecordKeeper &RK, raw_ostream &OS) {
emitSourceFileHeader("Assembly Writer Source Fragment", OS);
AsmWriterEmitter(RK).run(OS);
}
} // End llvm namespace
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/OptParserEmitter.cpp | //===- OptParserEmitter.cpp - Table Driven Command Line Parsing -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/TableGen/Error.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <cctype>
#include <cstring>
#include <map>
using namespace llvm;
// Ordering on Info. The logic should match with the consumer-side function in
// llvm/Option/OptTable.h.
static int StrCmpOptionName(const char *A, const char *B) {
const char *X = A, *Y = B;
char a = tolower(*A), b = tolower(*B);
while (a == b) {
if (a == '\0')
return strcmp(A, B);
a = tolower(*++X);
b = tolower(*++Y);
}
if (a == '\0') // A is a prefix of B.
return 1;
if (b == '\0') // B is a prefix of A.
return -1;
// Otherwise lexicographic.
return (a < b) ? -1 : 1;
}
// HLSL Change: changed calling convention to __cdecl
static int __cdecl CompareOptionRecords(Record *const *Av, Record *const *Bv) {
const Record *A = *Av;
const Record *B = *Bv;
// Sentinel options precede all others and are only ordered by precedence.
bool ASent = A->getValueAsDef("Kind")->getValueAsBit("Sentinel");
bool BSent = B->getValueAsDef("Kind")->getValueAsBit("Sentinel");
if (ASent != BSent)
return ASent ? -1 : 1;
// Compare options by name, unless they are sentinels.
if (!ASent)
if (int Cmp = StrCmpOptionName(A->getValueAsString("Name").c_str(),
B->getValueAsString("Name").c_str()))
return Cmp;
if (!ASent) {
std::vector<std::string> APrefixes = A->getValueAsListOfStrings("Prefixes");
std::vector<std::string> BPrefixes = B->getValueAsListOfStrings("Prefixes");
for (std::vector<std::string>::const_iterator APre = APrefixes.begin(),
AEPre = APrefixes.end(),
BPre = BPrefixes.begin(),
BEPre = BPrefixes.end();
APre != AEPre &&
BPre != BEPre;
++APre, ++BPre) {
if (int Cmp = StrCmpOptionName(APre->c_str(), BPre->c_str()))
return Cmp;
}
}
// Then by the kind precedence;
int APrec = A->getValueAsDef("Kind")->getValueAsInt("Precedence");
int BPrec = B->getValueAsDef("Kind")->getValueAsInt("Precedence");
if (APrec == BPrec &&
A->getValueAsListOfStrings("Prefixes") ==
B->getValueAsListOfStrings("Prefixes")) {
PrintError(A->getLoc(), Twine("Option is equivalent to"));
PrintError(B->getLoc(), Twine("Other defined here"));
PrintFatalError("Equivalent Options found.");
}
return APrec < BPrec ? -1 : 1;
}
static const std::string getOptionName(const Record &R) {
// Use the record name unless EnumName is defined.
if (isa<UnsetInit>(R.getValueInit("EnumName")))
return R.getName();
return R.getValueAsString("EnumName");
}
static raw_ostream &write_cstring(raw_ostream &OS, llvm::StringRef Str) {
OS << '"';
OS.write_escaped(Str);
OS << '"';
return OS;
}
/// OptParserEmitter - This tablegen backend takes an input .td file
/// describing a list of options and emits a data structure for parsing and
/// working with those options when given an input command line.
namespace llvm {
void EmitOptParser(RecordKeeper &Records, raw_ostream &OS) {
// Get the option groups and options.
const std::vector<Record*> &Groups =
Records.getAllDerivedDefinitions("OptionGroup");
std::vector<Record*> Opts = Records.getAllDerivedDefinitions("Option");
emitSourceFileHeader("Option Parsing Definitions", OS);
array_pod_sort(Opts.begin(), Opts.end(), CompareOptionRecords);
// Generate prefix groups.
typedef SmallVector<SmallString<2>, 2> PrefixKeyT;
typedef std::map<PrefixKeyT, std::string> PrefixesT;
PrefixesT Prefixes;
Prefixes.insert(std::make_pair(PrefixKeyT(), "prefix_0"));
unsigned CurPrefix = 0;
for (unsigned i = 0, e = Opts.size(); i != e; ++i) {
const Record &R = *Opts[i];
std::vector<std::string> prf = R.getValueAsListOfStrings("Prefixes");
PrefixKeyT prfkey(prf.begin(), prf.end());
unsigned NewPrefix = CurPrefix + 1;
if (Prefixes.insert(std::make_pair(prfkey, (Twine("prefix_") +
Twine(NewPrefix)).str())).second)
CurPrefix = NewPrefix;
}
// Dump prefixes.
OS << "/////////\n";
OS << "// Prefixes\n\n";
OS << "#ifdef PREFIX\n";
OS << "#define COMMA ,\n";
for (PrefixesT::const_iterator I = Prefixes.begin(), E = Prefixes.end();
I != E; ++I) {
OS << "PREFIX(";
// Prefix name.
OS << I->second;
// Prefix values.
OS << ", {";
for (PrefixKeyT::const_iterator PI = I->first.begin(),
PE = I->first.end(); PI != PE; ++PI) {
OS << "\"" << *PI << "\" COMMA ";
}
OS << "0})\n";
}
OS << "#undef COMMA\n";
OS << "#endif\n\n";
OS << "/////////\n";
OS << "// Groups\n\n";
OS << "#ifdef OPTION\n";
for (unsigned i = 0, e = Groups.size(); i != e; ++i) {
const Record &R = *Groups[i];
// Start a single option entry.
OS << "OPTION(";
// The option prefix;
OS << "0";
// The option string.
OS << ", \"" << R.getValueAsString("Name") << '"';
// The option identifier name.
OS << ", "<< getOptionName(R);
// The option kind.
OS << ", Group";
// The containing option group (if any).
OS << ", ";
if (const DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Group")))
OS << getOptionName(*DI->getDef());
else
OS << "INVALID";
// The other option arguments (unused for groups).
OS << ", INVALID, 0, 0, 0";
// The option help text.
if (!isa<UnsetInit>(R.getValueInit("HelpText"))) {
OS << ",\n";
OS << " ";
write_cstring(OS, R.getValueAsString("HelpText"));
} else
OS << ", 0";
// The option meta-variable name (unused).
OS << ", 0)\n";
}
OS << "\n";
OS << "//////////\n";
OS << "// Options\n\n";
for (unsigned i = 0, e = Opts.size(); i != e; ++i) {
const Record &R = *Opts[i];
// Start a single option entry.
OS << "OPTION(";
// The option prefix;
std::vector<std::string> prf = R.getValueAsListOfStrings("Prefixes");
OS << Prefixes[PrefixKeyT(prf.begin(), prf.end())] << ", ";
// The option string.
write_cstring(OS, R.getValueAsString("Name"));
// The option identifier name.
OS << ", "<< getOptionName(R);
// The option kind.
OS << ", " << R.getValueAsDef("Kind")->getValueAsString("Name");
// The containing option group (if any).
OS << ", ";
const ListInit *GroupFlags = nullptr;
if (const DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Group"))) {
GroupFlags = DI->getDef()->getValueAsListInit("Flags");
OS << getOptionName(*DI->getDef());
} else
OS << "INVALID";
// The option alias (if any).
OS << ", ";
if (const DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Alias")))
OS << getOptionName(*DI->getDef());
else
OS << "INVALID";
// The option alias arguments (if any).
// Emitted as a \0 separated list in a string, e.g. ["foo", "bar"]
// would become "foo\0bar\0". Note that the compiler adds an implicit
// terminating \0 at the end.
OS << ", ";
std::vector<std::string> AliasArgs = R.getValueAsListOfStrings("AliasArgs");
if (AliasArgs.size() == 0) {
OS << "0";
} else {
OS << "\"";
for (size_t i = 0, e = AliasArgs.size(); i != e; ++i)
OS << AliasArgs[i] << "\\0";
OS << "\"";
}
// The option flags.
OS << ", ";
int NumFlags = 0;
const ListInit *LI = R.getValueAsListInit("Flags");
for (Init *I : *LI)
OS << (NumFlags++ ? " | " : "")
<< cast<DefInit>(I)->getDef()->getName();
if (GroupFlags) {
for (Init *I : *GroupFlags)
OS << (NumFlags++ ? " | " : "")
<< cast<DefInit>(I)->getDef()->getName();
}
if (NumFlags == 0)
OS << '0';
// The option parameter field.
OS << ", " << R.getValueAsInt("NumArgs");
// The option help text.
if (!isa<UnsetInit>(R.getValueInit("HelpText"))) {
OS << ",\n";
OS << " ";
write_cstring(OS, R.getValueAsString("HelpText"));
} else
OS << ", 0";
// The option meta-variable name.
OS << ", ";
if (!isa<UnsetInit>(R.getValueInit("MetaVarName")))
write_cstring(OS, R.getValueAsString("MetaVarName"));
else
OS << "0";
OS << ")\n";
}
OS << "#endif\n";
}
} // end namespace llvm
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/CMakeLists.txt | set(LLVM_LINK_COMPONENTS Support MSSupport)
set (HLSL_IGNORE_SOURCES
X86DisassemblerTables.cpp
X86ModRMFilters.cpp
X86RecognizableInstr.cpp
)
add_tablegen(llvm-tblgen LLVM
AsmMatcherEmitter.cpp
AsmWriterEmitter.cpp
AsmWriterInst.cpp
CallingConvEmitter.cpp
CodeEmitterGen.cpp
CodeGenDAGPatterns.cpp
CodeGenInstruction.cpp
CodeGenMapTable.cpp
CodeGenRegisters.cpp
CodeGenSchedule.cpp
CodeGenTarget.cpp
DAGISelEmitter.cpp
DAGISelMatcherEmitter.cpp
DAGISelMatcherGen.cpp
DAGISelMatcherOpt.cpp
DAGISelMatcher.cpp
DFAPacketizerEmitter.cpp
DisassemblerEmitter.cpp
FastISelEmitter.cpp
FixedLenDecoderEmitter.cpp
InstrInfoEmitter.cpp
IntrinsicEmitter.cpp
OptParserEmitter.cpp
PseudoLoweringEmitter.cpp
RegisterInfoEmitter.cpp
SubtargetEmitter.cpp
TableGen.cpp
CTagsEmitter.cpp
)
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/FixedLenDecoderEmitter.cpp | //===------------ FixedLenDecoderEmitter.cpp - Decoder Generator ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// It contains the tablegen backend that emits the decoder functions for
// targets with fixed length instruction set.
//
//===----------------------------------------------------------------------===//
#include "CodeGenTarget.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCFixedLenDisassembler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include <map>
#include <string>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "decoder-emitter"
namespace {
struct EncodingField {
unsigned Base, Width, Offset;
EncodingField(unsigned B, unsigned W, unsigned O)
: Base(B), Width(W), Offset(O) { }
};
struct OperandInfo {
std::vector<EncodingField> Fields;
std::string Decoder;
OperandInfo(std::string D)
: Decoder(D) { }
void addField(unsigned Base, unsigned Width, unsigned Offset) {
Fields.push_back(EncodingField(Base, Width, Offset));
}
unsigned numFields() const { return Fields.size(); }
typedef std::vector<EncodingField>::const_iterator const_iterator;
const_iterator begin() const { return Fields.begin(); }
const_iterator end() const { return Fields.end(); }
};
typedef std::vector<uint8_t> DecoderTable;
typedef uint32_t DecoderFixup;
typedef std::vector<DecoderFixup> FixupList;
typedef std::vector<FixupList> FixupScopeList;
typedef SetVector<std::string> PredicateSet;
typedef SetVector<std::string> DecoderSet;
struct DecoderTableInfo {
DecoderTable Table;
FixupScopeList FixupStack;
PredicateSet Predicates;
DecoderSet Decoders;
};
} // End anonymous namespace
namespace {
class FixedLenDecoderEmitter {
const std::vector<const CodeGenInstruction*> *NumberedInstructions;
public:
// Defaults preserved here for documentation, even though they aren't
// strictly necessary given the way that this is currently being called.
FixedLenDecoderEmitter(RecordKeeper &R,
std::string PredicateNamespace,
std::string GPrefix = "if (",
std::string GPostfix = " == MCDisassembler::Fail)"
" return MCDisassembler::Fail;",
std::string ROK = "MCDisassembler::Success",
std::string RFail = "MCDisassembler::Fail",
std::string L = "") :
Target(R),
PredicateNamespace(PredicateNamespace),
GuardPrefix(GPrefix), GuardPostfix(GPostfix),
ReturnOK(ROK), ReturnFail(RFail), Locals(L) {}
// Emit the decoder state machine table.
void emitTable(formatted_raw_ostream &o, DecoderTable &Table,
unsigned Indentation, unsigned BitWidth,
StringRef Namespace) const;
void emitPredicateFunction(formatted_raw_ostream &OS,
PredicateSet &Predicates,
unsigned Indentation) const;
void emitDecoderFunction(formatted_raw_ostream &OS,
DecoderSet &Decoders,
unsigned Indentation) const;
// run - Output the code emitter
void run(raw_ostream &o);
private:
CodeGenTarget Target;
public:
std::string PredicateNamespace;
std::string GuardPrefix, GuardPostfix;
std::string ReturnOK, ReturnFail;
std::string Locals;
};
} // End anonymous namespace
// The set (BIT_TRUE, BIT_FALSE, BIT_UNSET) represents a ternary logic system
// for a bit value.
//
// BIT_UNFILTERED is used as the init value for a filter position. It is used
// only for filter processings.
typedef enum {
BIT_TRUE, // '1'
BIT_FALSE, // '0'
BIT_UNSET, // '?'
BIT_UNFILTERED // unfiltered
} bit_value_t;
static bool ValueSet(bit_value_t V) {
return (V == BIT_TRUE || V == BIT_FALSE);
}
static bool ValueNotSet(bit_value_t V) {
return (V == BIT_UNSET);
}
static int Value(bit_value_t V) {
return ValueNotSet(V) ? -1 : (V == BIT_FALSE ? 0 : 1);
}
static bit_value_t bitFromBits(const BitsInit &bits, unsigned index) {
if (BitInit *bit = dyn_cast<BitInit>(bits.getBit(index)))
return bit->getValue() ? BIT_TRUE : BIT_FALSE;
// The bit is uninitialized.
return BIT_UNSET;
}
// Prints the bit value for each position.
static void dumpBits(raw_ostream &o, const BitsInit &bits) {
for (unsigned index = bits.getNumBits(); index > 0; --index) {
switch (bitFromBits(bits, index - 1)) {
case BIT_TRUE:
o << "1";
break;
case BIT_FALSE:
o << "0";
break;
case BIT_UNSET:
o << "_";
break;
default:
llvm_unreachable("unexpected return value from bitFromBits");
}
}
}
static BitsInit &getBitsField(const Record &def, const char *str) {
BitsInit *bits = def.getValueAsBitsInit(str);
return *bits;
}
// Forward declaration.
namespace {
class FilterChooser;
} // End anonymous namespace
// Representation of the instruction to work on.
typedef std::vector<bit_value_t> insn_t;
/// Filter - Filter works with FilterChooser to produce the decoding tree for
/// the ISA.
///
/// It is useful to think of a Filter as governing the switch stmts of the
/// decoding tree in a certain level. Each case stmt delegates to an inferior
/// FilterChooser to decide what further decoding logic to employ, or in another
/// words, what other remaining bits to look at. The FilterChooser eventually
/// chooses a best Filter to do its job.
///
/// This recursive scheme ends when the number of Opcodes assigned to the
/// FilterChooser becomes 1 or if there is a conflict. A conflict happens when
/// the Filter/FilterChooser combo does not know how to distinguish among the
/// Opcodes assigned.
///
/// An example of a conflict is
///
/// Conflict:
/// 111101000.00........00010000....
/// 111101000.00........0001........
/// 1111010...00........0001........
/// 1111010...00....................
/// 1111010.........................
/// 1111............................
/// ................................
/// VST4q8a 111101000_00________00010000____
/// VST4q8b 111101000_00________00010000____
///
/// The Debug output shows the path that the decoding tree follows to reach the
/// the conclusion that there is a conflict. VST4q8a is a vst4 to double-spaced
/// even registers, while VST4q8b is a vst4 to double-spaced odd registers.
///
/// The encoding info in the .td files does not specify this meta information,
/// which could have been used by the decoder to resolve the conflict. The
/// decoder could try to decode the even/odd register numbering and assign to
/// VST4q8a or VST4q8b, but for the time being, the decoder chooses the "a"
/// version and return the Opcode since the two have the same Asm format string.
namespace {
class Filter {
protected:
const FilterChooser *Owner;// points to the FilterChooser who owns this filter
unsigned StartBit; // the starting bit position
unsigned NumBits; // number of bits to filter
bool Mixed; // a mixed region contains both set and unset bits
// Map of well-known segment value to the set of uid's with that value.
std::map<uint64_t, std::vector<unsigned> > FilteredInstructions;
// Set of uid's with non-constant segment values.
std::vector<unsigned> VariableInstructions;
// Map of well-known segment value to its delegate.
std::map<unsigned, std::unique_ptr<const FilterChooser>> FilterChooserMap;
// Number of instructions which fall under FilteredInstructions category.
unsigned NumFiltered;
// Keeps track of the last opcode in the filtered bucket.
unsigned LastOpcFiltered;
public:
unsigned getNumFiltered() const { return NumFiltered; }
unsigned getSingletonOpc() const {
assert(NumFiltered == 1);
return LastOpcFiltered;
}
// Return the filter chooser for the group of instructions without constant
// segment values.
const FilterChooser &getVariableFC() const {
assert(NumFiltered == 1);
assert(FilterChooserMap.size() == 1);
return *(FilterChooserMap.find((unsigned)-1)->second);
}
Filter(Filter &&f);
Filter(FilterChooser &owner, unsigned startBit, unsigned numBits, bool mixed);
~Filter();
// Divides the decoding task into sub tasks and delegates them to the
// inferior FilterChooser's.
//
// A special case arises when there's only one entry in the filtered
// instructions. In order to unambiguously decode the singleton, we need to
// match the remaining undecoded encoding bits against the singleton.
void recurse();
// Emit table entries to decode instructions given a segment or segments of
// bits.
void emitTableEntry(DecoderTableInfo &TableInfo) const;
// Returns the number of fanout produced by the filter. More fanout implies
// the filter distinguishes more categories of instructions.
unsigned usefulness() const;
}; // End of class Filter
} // End anonymous namespace
// These are states of our finite state machines used in FilterChooser's
// filterProcessor() which produces the filter candidates to use.
typedef enum {
ATTR_NONE,
ATTR_FILTERED,
ATTR_ALL_SET,
ATTR_ALL_UNSET,
ATTR_MIXED
} bitAttr_t;
/// FilterChooser - FilterChooser chooses the best filter among a set of Filters
/// in order to perform the decoding of instructions at the current level.
///
/// Decoding proceeds from the top down. Based on the well-known encoding bits
/// of instructions available, FilterChooser builds up the possible Filters that
/// can further the task of decoding by distinguishing among the remaining
/// candidate instructions.
///
/// Once a filter has been chosen, it is called upon to divide the decoding task
/// into sub-tasks and delegates them to its inferior FilterChoosers for further
/// processings.
///
/// It is useful to think of a Filter as governing the switch stmts of the
/// decoding tree. And each case is delegated to an inferior FilterChooser to
/// decide what further remaining bits to look at.
namespace {
class FilterChooser {
protected:
friend class Filter;
// Vector of codegen instructions to choose our filter.
const std::vector<const CodeGenInstruction*> &AllInstructions;
// Vector of uid's for this filter chooser to work on.
const std::vector<unsigned> &Opcodes;
// Lookup table for the operand decoding of instructions.
const std::map<unsigned, std::vector<OperandInfo> > &Operands;
// Vector of candidate filters.
std::vector<Filter> Filters;
// Array of bit values passed down from our parent.
// Set to all BIT_UNFILTERED's for Parent == NULL.
std::vector<bit_value_t> FilterBitValues;
// Links to the FilterChooser above us in the decoding tree.
const FilterChooser *Parent;
// Index of the best filter from Filters.
int BestIndex;
// Width of instructions
unsigned BitWidth;
// Parent emitter
const FixedLenDecoderEmitter *Emitter;
FilterChooser(const FilterChooser &) = delete;
void operator=(const FilterChooser &) = delete;
public:
FilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
const std::vector<unsigned> &IDs,
const std::map<unsigned, std::vector<OperandInfo> > &Ops,
unsigned BW,
const FixedLenDecoderEmitter *E)
: AllInstructions(Insts), Opcodes(IDs), Operands(Ops), Filters(),
FilterBitValues(BW, BIT_UNFILTERED), Parent(nullptr), BestIndex(-1),
BitWidth(BW), Emitter(E) {
doFilter();
}
FilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
const std::vector<unsigned> &IDs,
const std::map<unsigned, std::vector<OperandInfo> > &Ops,
const std::vector<bit_value_t> &ParentFilterBitValues,
const FilterChooser &parent)
: AllInstructions(Insts), Opcodes(IDs), Operands(Ops),
Filters(), FilterBitValues(ParentFilterBitValues),
Parent(&parent), BestIndex(-1), BitWidth(parent.BitWidth),
Emitter(parent.Emitter) {
doFilter();
}
unsigned getBitWidth() const { return BitWidth; }
protected:
// Populates the insn given the uid.
void insnWithID(insn_t &Insn, unsigned Opcode) const {
BitsInit &Bits = getBitsField(*AllInstructions[Opcode]->TheDef, "Inst");
// We may have a SoftFail bitmask, which specifies a mask where an encoding
// may differ from the value in "Inst" and yet still be valid, but the
// disassembler should return SoftFail instead of Success.
//
// This is used for marking UNPREDICTABLE instructions in the ARM world.
BitsInit *SFBits =
AllInstructions[Opcode]->TheDef->getValueAsBitsInit("SoftFail");
for (unsigned i = 0; i < BitWidth; ++i) {
if (SFBits && bitFromBits(*SFBits, i) == BIT_TRUE)
Insn.push_back(BIT_UNSET);
else
Insn.push_back(bitFromBits(Bits, i));
}
}
// Returns the record name.
const std::string &nameWithID(unsigned Opcode) const {
return AllInstructions[Opcode]->TheDef->getName();
}
// Populates the field of the insn given the start position and the number of
// consecutive bits to scan for.
//
// Returns false if there exists any uninitialized bit value in the range.
// Returns true, otherwise.
bool fieldFromInsn(uint64_t &Field, insn_t &Insn, unsigned StartBit,
unsigned NumBits) const;
/// dumpFilterArray - dumpFilterArray prints out debugging info for the given
/// filter array as a series of chars.
void dumpFilterArray(raw_ostream &o,
const std::vector<bit_value_t> & filter) const;
/// dumpStack - dumpStack traverses the filter chooser chain and calls
/// dumpFilterArray on each filter chooser up to the top level one.
void dumpStack(raw_ostream &o, const char *prefix) const;
Filter &bestFilter() {
assert(BestIndex != -1 && "BestIndex not set");
return Filters[BestIndex];
}
// Called from Filter::recurse() when singleton exists. For debug purpose.
//void SingletonExists(unsigned Opc) const; // HLSL Change - Unused
bool PositionFiltered(unsigned i) const {
return ValueSet(FilterBitValues[i]);
}
// Calculates the island(s) needed to decode the instruction.
// This returns a lit of undecoded bits of an instructions, for example,
// Inst{20} = 1 && Inst{3-0} == 0b1111 represents two islands of yet-to-be
// decoded bits in order to verify that the instruction matches the Opcode.
unsigned getIslands(std::vector<unsigned> &StartBits,
std::vector<unsigned> &EndBits,
std::vector<uint64_t> &FieldVals,
const insn_t &Insn) const;
// Emits code to check the Predicates member of an instruction are true.
// Returns true if predicate matches were emitted, false otherwise.
bool emitPredicateMatch(raw_ostream &o, unsigned &Indentation,
unsigned Opc) const;
bool doesOpcodeNeedPredicate(unsigned Opc) const;
unsigned getPredicateIndex(DecoderTableInfo &TableInfo, StringRef P) const;
void emitPredicateTableEntry(DecoderTableInfo &TableInfo,
unsigned Opc) const;
void emitSoftFailTableEntry(DecoderTableInfo &TableInfo,
unsigned Opc) const;
// Emits table entries to decode the singleton.
void emitSingletonTableEntry(DecoderTableInfo &TableInfo,
unsigned Opc) const;
// Emits code to decode the singleton, and then to decode the rest.
void emitSingletonTableEntry(DecoderTableInfo &TableInfo,
const Filter &Best) const;
void emitBinaryParser(raw_ostream &o, unsigned &Indentation,
const OperandInfo &OpInfo) const;
void emitDecoder(raw_ostream &OS, unsigned Indentation, unsigned Opc) const;
unsigned getDecoderIndex(DecoderSet &Decoders, unsigned Opc) const;
// Assign a single filter and run with it.
void runSingleFilter(unsigned startBit, unsigned numBit, bool mixed);
// reportRegion is a helper function for filterProcessor to mark a region as
// eligible for use as a filter region.
void reportRegion(bitAttr_t RA, unsigned StartBit, unsigned BitIndex,
bool AllowMixed);
// FilterProcessor scans the well-known encoding bits of the instructions and
// builds up a list of candidate filters. It chooses the best filter and
// recursively descends down the decoding tree.
bool filterProcessor(bool AllowMixed, bool Greedy = true);
// Decides on the best configuration of filter(s) to use in order to decode
// the instructions. A conflict of instructions may occur, in which case we
// dump the conflict set to the standard error.
void doFilter();
public:
// emitTableEntries - Emit state machine entries to decode our share of
// instructions.
void emitTableEntries(DecoderTableInfo &TableInfo) const;
};
} // End anonymous namespace
///////////////////////////
// //
// Filter Implementation //
// //
///////////////////////////
Filter::Filter(Filter &&f)
: Owner(f.Owner), StartBit(f.StartBit), NumBits(f.NumBits), Mixed(f.Mixed),
FilteredInstructions(std::move(f.FilteredInstructions)),
VariableInstructions(std::move(f.VariableInstructions)),
FilterChooserMap(std::move(f.FilterChooserMap)), NumFiltered(f.NumFiltered),
LastOpcFiltered(f.LastOpcFiltered) {
}
Filter::Filter(FilterChooser &owner, unsigned startBit, unsigned numBits,
bool mixed)
: Owner(&owner), StartBit(startBit), NumBits(numBits), Mixed(mixed) {
assert(StartBit + NumBits - 1 < Owner->BitWidth);
NumFiltered = 0;
LastOpcFiltered = 0;
for (unsigned i = 0, e = Owner->Opcodes.size(); i != e; ++i) {
insn_t Insn;
// Populates the insn given the uid.
Owner->insnWithID(Insn, Owner->Opcodes[i]);
uint64_t Field;
// Scans the segment for possibly well-specified encoding bits.
bool ok = Owner->fieldFromInsn(Field, Insn, StartBit, NumBits);
if (ok) {
// The encoding bits are well-known. Lets add the uid of the
// instruction into the bucket keyed off the constant field value.
LastOpcFiltered = Owner->Opcodes[i];
FilteredInstructions[Field].push_back(LastOpcFiltered);
++NumFiltered;
} else {
// Some of the encoding bit(s) are unspecified. This contributes to
// one additional member of "Variable" instructions.
VariableInstructions.push_back(Owner->Opcodes[i]);
}
}
assert((FilteredInstructions.size() + VariableInstructions.size() > 0)
&& "Filter returns no instruction categories");
}
Filter::~Filter() {
}
// Divides the decoding task into sub tasks and delegates them to the
// inferior FilterChooser's.
//
// A special case arises when there's only one entry in the filtered
// instructions. In order to unambiguously decode the singleton, we need to
// match the remaining undecoded encoding bits against the singleton.
void Filter::recurse() {
// Starts by inheriting our parent filter chooser's filter bit values.
std::vector<bit_value_t> BitValueArray(Owner->FilterBitValues);
if (!VariableInstructions.empty()) {
// Conservatively marks each segment position as BIT_UNSET.
for (unsigned bitIndex = 0; bitIndex < NumBits; ++bitIndex)
BitValueArray[StartBit + bitIndex] = BIT_UNSET;
// Delegates to an inferior filter chooser for further processing on this
// group of instructions whose segment values are variable.
FilterChooserMap.insert(
std::make_pair(-1U, llvm::make_unique<FilterChooser>(
Owner->AllInstructions, VariableInstructions,
Owner->Operands, BitValueArray, *Owner)));
}
// No need to recurse for a singleton filtered instruction.
// See also Filter::emit*().
if (getNumFiltered() == 1) {
//Owner->SingletonExists(LastOpcFiltered);
assert(FilterChooserMap.size() == 1);
return;
}
// Otherwise, create sub choosers.
for (const auto &Inst : FilteredInstructions) {
// Marks all the segment positions with either BIT_TRUE or BIT_FALSE.
for (unsigned bitIndex = 0; bitIndex < NumBits; ++bitIndex) {
if (Inst.first & (1ULL << bitIndex))
BitValueArray[StartBit + bitIndex] = BIT_TRUE;
else
BitValueArray[StartBit + bitIndex] = BIT_FALSE;
}
// Delegates to an inferior filter chooser for further processing on this
// category of instructions.
FilterChooserMap.insert(std::make_pair(
Inst.first, llvm::make_unique<FilterChooser>(
Owner->AllInstructions, Inst.second,
Owner->Operands, BitValueArray, *Owner)));
}
}
static void resolveTableFixups(DecoderTable &Table, const FixupList &Fixups,
uint32_t DestIdx) {
// Any NumToSkip fixups in the current scope can resolve to the
// current location.
for (FixupList::const_reverse_iterator I = Fixups.rbegin(),
E = Fixups.rend();
I != E; ++I) {
// Calculate the distance from the byte following the fixup entry byte
// to the destination. The Target is calculated from after the 16-bit
// NumToSkip entry itself, so subtract two from the displacement here
// to account for that.
uint32_t FixupIdx = *I;
uint32_t Delta = DestIdx - FixupIdx - 2;
// Our NumToSkip entries are 16-bits. Make sure our table isn't too
// big.
assert(Delta < 65536U && "disassembler decoding table too large!");
Table[FixupIdx] = (uint8_t)Delta;
Table[FixupIdx + 1] = (uint8_t)(Delta >> 8);
}
}
// Emit table entries to decode instructions given a segment or segments
// of bits.
void Filter::emitTableEntry(DecoderTableInfo &TableInfo) const {
TableInfo.Table.push_back(MCD::OPC_ExtractField);
TableInfo.Table.push_back(StartBit);
TableInfo.Table.push_back(NumBits);
// A new filter entry begins a new scope for fixup resolution.
TableInfo.FixupStack.emplace_back();
DecoderTable &Table = TableInfo.Table;
size_t PrevFilter = 0;
bool HasFallthrough = false;
for (auto &Filter : FilterChooserMap) {
// Field value -1 implies a non-empty set of variable instructions.
// See also recurse().
if (Filter.first == (unsigned)-1) {
HasFallthrough = true;
// Each scope should always have at least one filter value to check
// for.
assert(PrevFilter != 0 && "empty filter set!");
FixupList &CurScope = TableInfo.FixupStack.back();
// Resolve any NumToSkip fixups in the current scope.
resolveTableFixups(Table, CurScope, Table.size());
CurScope.clear();
PrevFilter = 0; // Don't re-process the filter's fallthrough.
} else {
Table.push_back(MCD::OPC_FilterValue);
// Encode and emit the value to filter against.
uint8_t Buffer[8];
unsigned Len = encodeULEB128(Filter.first, Buffer);
Table.insert(Table.end(), Buffer, Buffer + Len);
// Reserve space for the NumToSkip entry. We'll backpatch the value
// later.
PrevFilter = Table.size();
Table.push_back(0);
Table.push_back(0);
}
// We arrive at a category of instructions with the same segment value.
// Now delegate to the sub filter chooser for further decodings.
// The case may fallthrough, which happens if the remaining well-known
// encoding bits do not match exactly.
Filter.second->emitTableEntries(TableInfo);
// Now that we've emitted the body of the handler, update the NumToSkip
// of the filter itself to be able to skip forward when false. Subtract
// two as to account for the width of the NumToSkip field itself.
if (PrevFilter) {
uint32_t NumToSkip = Table.size() - PrevFilter - 2;
assert(NumToSkip < 65536U && "disassembler decoding table too large!");
Table[PrevFilter] = (uint8_t)NumToSkip;
Table[PrevFilter + 1] = (uint8_t)(NumToSkip >> 8);
}
}
// Any remaining unresolved fixups bubble up to the parent fixup scope.
assert(TableInfo.FixupStack.size() > 1 && "fixup stack underflow!");
FixupScopeList::iterator Source = TableInfo.FixupStack.end() - 1;
FixupScopeList::iterator Dest = Source - 1;
Dest->insert(Dest->end(), Source->begin(), Source->end());
TableInfo.FixupStack.pop_back();
// If there is no fallthrough, then the final filter should get fixed
// up according to the enclosing scope rather than the current position.
if (!HasFallthrough)
TableInfo.FixupStack.back().push_back(PrevFilter);
}
// Returns the number of fanout produced by the filter. More fanout implies
// the filter distinguishes more categories of instructions.
unsigned Filter::usefulness() const {
if (!VariableInstructions.empty())
return FilteredInstructions.size();
else
return FilteredInstructions.size() + 1;
}
//////////////////////////////////
// //
// Filterchooser Implementation //
// //
//////////////////////////////////
// Emit the decoder state machine table.
void FixedLenDecoderEmitter::emitTable(formatted_raw_ostream &OS,
DecoderTable &Table,
unsigned Indentation,
unsigned BitWidth,
StringRef Namespace) const {
OS.indent(Indentation) << "static const uint8_t DecoderTable" << Namespace
<< BitWidth << "[] = {\n";
Indentation += 2;
// FIXME: We may be able to use the NumToSkip values to recover
// appropriate indentation levels.
DecoderTable::const_iterator I = Table.begin();
DecoderTable::const_iterator E = Table.end();
while (I != E) {
assert (I < E && "incomplete decode table entry!");
uint64_t Pos = I - Table.begin();
OS << "/* " << Pos << " */";
OS.PadToColumn(12);
switch (*I) {
default:
PrintFatalError("invalid decode table opcode");
case MCD::OPC_ExtractField: {
++I;
unsigned Start = *I++;
unsigned Len = *I++;
OS.indent(Indentation) << "MCD::OPC_ExtractField, " << Start << ", "
<< Len << ", // Inst{";
if (Len > 1)
OS << (Start + Len - 1) << "-";
OS << Start << "} ...\n";
break;
}
case MCD::OPC_FilterValue: {
++I;
OS.indent(Indentation) << "MCD::OPC_FilterValue, ";
// The filter value is ULEB128 encoded.
while (*I >= 128)
OS << utostr(*I++) << ", ";
OS << utostr(*I++) << ", ";
// 16-bit numtoskip value.
uint8_t Byte = *I++;
uint32_t NumToSkip = Byte;
OS << utostr(Byte) << ", ";
Byte = *I++;
OS << utostr(Byte) << ", ";
NumToSkip |= Byte << 8;
OS << "// Skip to: " << ((I - Table.begin()) + NumToSkip) << "\n";
break;
}
case MCD::OPC_CheckField: {
++I;
unsigned Start = *I++;
unsigned Len = *I++;
OS.indent(Indentation) << "MCD::OPC_CheckField, " << Start << ", "
<< Len << ", ";// << Val << ", " << NumToSkip << ",\n";
// ULEB128 encoded field value.
for (; *I >= 128; ++I)
OS << utostr(*I) << ", ";
OS << utostr(*I++) << ", ";
// 16-bit numtoskip value.
uint8_t Byte = *I++;
uint32_t NumToSkip = Byte;
OS << utostr(Byte) << ", ";
Byte = *I++;
OS << utostr(Byte) << ", ";
NumToSkip |= Byte << 8;
OS << "// Skip to: " << ((I - Table.begin()) + NumToSkip) << "\n";
break;
}
case MCD::OPC_CheckPredicate: {
++I;
OS.indent(Indentation) << "MCD::OPC_CheckPredicate, ";
for (; *I >= 128; ++I)
OS << utostr(*I) << ", ";
OS << utostr(*I++) << ", ";
// 16-bit numtoskip value.
uint8_t Byte = *I++;
uint32_t NumToSkip = Byte;
OS << utostr(Byte) << ", ";
Byte = *I++;
OS << utostr(Byte) << ", ";
NumToSkip |= Byte << 8;
OS << "// Skip to: " << ((I - Table.begin()) + NumToSkip) << "\n";
break;
}
case MCD::OPC_Decode: {
++I;
// Extract the ULEB128 encoded Opcode to a buffer.
uint8_t Buffer[8], *p = Buffer;
while ((*p++ = *I++) >= 128)
assert((p - Buffer) <= (ptrdiff_t)sizeof(Buffer)
&& "ULEB128 value too large!");
// Decode the Opcode value.
unsigned Opc = decodeULEB128(Buffer);
OS.indent(Indentation) << "MCD::OPC_Decode, ";
for (p = Buffer; *p >= 128; ++p)
OS << utostr(*p) << ", ";
OS << utostr(*p) << ", ";
// Decoder index.
for (; *I >= 128; ++I)
OS << utostr(*I) << ", ";
OS << utostr(*I++) << ", ";
OS << "// Opcode: "
<< NumberedInstructions->at(Opc)->TheDef->getName() << "\n";
break;
}
case MCD::OPC_SoftFail: {
++I;
OS.indent(Indentation) << "MCD::OPC_SoftFail";
// Positive mask
uint64_t Value = 0;
unsigned Shift = 0;
do {
OS << ", " << utostr(*I);
Value += (*I & 0x7f) << Shift;
Shift += 7;
} while (*I++ >= 128);
if (Value > 127)
OS << " /* 0x" << utohexstr(Value) << " */";
// Negative mask
Value = 0;
Shift = 0;
do {
OS << ", " << utostr(*I);
Value += (*I & 0x7f) << Shift;
Shift += 7;
} while (*I++ >= 128);
if (Value > 127)
OS << " /* 0x" << utohexstr(Value) << " */";
OS << ",\n";
break;
}
case MCD::OPC_Fail: {
++I;
OS.indent(Indentation) << "MCD::OPC_Fail,\n";
break;
}
}
}
OS.indent(Indentation) << "0\n";
Indentation -= 2;
OS.indent(Indentation) << "};\n\n";
}
void FixedLenDecoderEmitter::
emitPredicateFunction(formatted_raw_ostream &OS, PredicateSet &Predicates,
unsigned Indentation) const {
// The predicate function is just a big switch statement based on the
// input predicate index.
OS.indent(Indentation) << "static bool checkDecoderPredicate(unsigned Idx, "
<< "const FeatureBitset& Bits) {\n";
Indentation += 2;
if (!Predicates.empty()) {
OS.indent(Indentation) << "switch (Idx) {\n";
OS.indent(Indentation) << "default: llvm_unreachable(\"Invalid index!\");\n";
unsigned Index = 0;
for (const auto &Predicate : Predicates) {
OS.indent(Indentation) << "case " << Index++ << ":\n";
OS.indent(Indentation+2) << "return (" << Predicate << ");\n";
}
OS.indent(Indentation) << "}\n";
} else {
// No case statement to emit
OS.indent(Indentation) << "llvm_unreachable(\"Invalid index!\");\n";
}
Indentation -= 2;
OS.indent(Indentation) << "}\n\n";
}
void FixedLenDecoderEmitter::
emitDecoderFunction(formatted_raw_ostream &OS, DecoderSet &Decoders,
unsigned Indentation) const {
// The decoder function is just a big switch statement based on the
// input decoder index.
OS.indent(Indentation) << "template<typename InsnType>\n";
OS.indent(Indentation) << "static DecodeStatus decodeToMCInst(DecodeStatus S,"
<< " unsigned Idx, InsnType insn, MCInst &MI,\n";
OS.indent(Indentation) << " uint64_t "
<< "Address, const void *Decoder) {\n";
Indentation += 2;
OS.indent(Indentation) << "InsnType tmp;\n";
OS.indent(Indentation) << "switch (Idx) {\n";
OS.indent(Indentation) << "default: llvm_unreachable(\"Invalid index!\");\n";
unsigned Index = 0;
for (const auto &Decoder : Decoders) {
OS.indent(Indentation) << "case " << Index++ << ":\n";
OS << Decoder;
OS.indent(Indentation+2) << "return S;\n";
}
OS.indent(Indentation) << "}\n";
Indentation -= 2;
OS.indent(Indentation) << "}\n\n";
}
// Populates the field of the insn given the start position and the number of
// consecutive bits to scan for.
//
// Returns false if and on the first uninitialized bit value encountered.
// Returns true, otherwise.
bool FilterChooser::fieldFromInsn(uint64_t &Field, insn_t &Insn,
unsigned StartBit, unsigned NumBits) const {
Field = 0;
for (unsigned i = 0; i < NumBits; ++i) {
if (Insn[StartBit + i] == BIT_UNSET)
return false;
if (Insn[StartBit + i] == BIT_TRUE)
Field = Field | (1ULL << i);
}
return true;
}
/// dumpFilterArray - dumpFilterArray prints out debugging info for the given
/// filter array as a series of chars.
void FilterChooser::dumpFilterArray(raw_ostream &o,
const std::vector<bit_value_t> &filter) const {
for (unsigned bitIndex = BitWidth; bitIndex > 0; bitIndex--) {
switch (filter[bitIndex - 1]) {
case BIT_UNFILTERED:
o << ".";
break;
case BIT_UNSET:
o << "_";
break;
case BIT_TRUE:
o << "1";
break;
case BIT_FALSE:
o << "0";
break;
}
}
}
/// dumpStack - dumpStack traverses the filter chooser chain and calls
/// dumpFilterArray on each filter chooser up to the top level one.
void FilterChooser::dumpStack(raw_ostream &o, const char *prefix) const {
const FilterChooser *current = this;
while (current) {
o << prefix;
dumpFilterArray(o, current->FilterBitValues);
o << '\n';
current = current->Parent;
}
}
#if 0 // HLSL Change Unused
// Called from Filter::recurse() when singleton exists. For debug purpose.
void FilterChooser::SingletonExists(unsigned Opc) const {
insn_t Insn0;
insnWithID(Insn0, Opc);
errs() << "Singleton exists: " << nameWithID(Opc)
<< " with its decoding dominating ";
for (unsigned i = 0; i < Opcodes.size(); ++i) {
if (Opcodes[i] == Opc) continue;
errs() << nameWithID(Opcodes[i]) << ' ';
}
errs() << '\n';
dumpStack(errs(), "\t\t");
for (unsigned i = 0; i < Opcodes.size(); ++i) {
const std::string &Name = nameWithID(Opcodes[i]);
errs() << '\t' << Name << " ";
dumpBits(errs(),
getBitsField(*AllInstructions[Opcodes[i]]->TheDef, "Inst"));
errs() << '\n';
}
}
#endif // HLSL Change Ends - Unused
// Calculates the island(s) needed to decode the instruction.
// This returns a list of undecoded bits of an instructions, for example,
// Inst{20} = 1 && Inst{3-0} == 0b1111 represents two islands of yet-to-be
// decoded bits in order to verify that the instruction matches the Opcode.
unsigned FilterChooser::getIslands(std::vector<unsigned> &StartBits,
std::vector<unsigned> &EndBits,
std::vector<uint64_t> &FieldVals,
const insn_t &Insn) const {
unsigned Num, BitNo;
Num = BitNo = 0;
uint64_t FieldVal = 0;
// 0: Init
// 1: Water (the bit value does not affect decoding)
// 2: Island (well-known bit value needed for decoding)
int State = 0;
int Val = -1;
for (unsigned i = 0; i < BitWidth; ++i) {
Val = Value(Insn[i]);
bool Filtered = PositionFiltered(i);
switch (State) {
default: llvm_unreachable("Unreachable code!");
case 0:
case 1:
if (Filtered || Val == -1)
State = 1; // Still in Water
else {
State = 2; // Into the Island
BitNo = 0;
StartBits.push_back(i);
FieldVal = Val;
}
break;
case 2:
if (Filtered || Val == -1) {
State = 1; // Into the Water
EndBits.push_back(i - 1);
FieldVals.push_back(FieldVal);
++Num;
} else {
State = 2; // Still in Island
++BitNo;
FieldVal = FieldVal | Val << BitNo;
}
break;
}
}
// If we are still in Island after the loop, do some housekeeping.
if (State == 2) {
EndBits.push_back(BitWidth - 1);
FieldVals.push_back(FieldVal);
++Num;
}
assert(StartBits.size() == Num && EndBits.size() == Num &&
FieldVals.size() == Num);
return Num;
}
void FilterChooser::emitBinaryParser(raw_ostream &o, unsigned &Indentation,
const OperandInfo &OpInfo) const {
const std::string &Decoder = OpInfo.Decoder;
if (OpInfo.numFields() != 1)
o.indent(Indentation) << "tmp = 0;\n";
for (const EncodingField &EF : OpInfo) {
o.indent(Indentation) << "tmp ";
if (OpInfo.numFields() != 1) o << '|';
o << "= fieldFromInstruction"
<< "(insn, " << EF.Base << ", " << EF.Width << ')';
if (OpInfo.numFields() != 1 || EF.Offset != 0)
o << " << " << EF.Offset;
o << ";\n";
}
if (Decoder != "")
o.indent(Indentation) << Emitter->GuardPrefix << Decoder
<< "(MI, tmp, Address, Decoder)"
<< Emitter->GuardPostfix << "\n";
else
o.indent(Indentation) << "MI.addOperand(MCOperand::createImm(tmp));\n";
}
void FilterChooser::emitDecoder(raw_ostream &OS, unsigned Indentation,
unsigned Opc) const {
for (const auto &Op : Operands.find(Opc)->second) {
// If a custom instruction decoder was specified, use that.
if (Op.numFields() == 0 && Op.Decoder.size()) {
OS.indent(Indentation) << Emitter->GuardPrefix << Op.Decoder
<< "(MI, insn, Address, Decoder)"
<< Emitter->GuardPostfix << "\n";
break;
}
emitBinaryParser(OS, Indentation, Op);
}
}
unsigned FilterChooser::getDecoderIndex(DecoderSet &Decoders,
unsigned Opc) const {
// Build up the predicate string.
SmallString<256> Decoder;
// FIXME: emitDecoder() function can take a buffer directly rather than
// a stream.
raw_svector_ostream S(Decoder);
unsigned I = 4;
emitDecoder(S, I, Opc);
S.flush();
// Using the full decoder string as the key value here is a bit
// heavyweight, but is effective. If the string comparisons become a
// performance concern, we can implement a mangling of the predicate
// data easilly enough with a map back to the actual string. That's
// overkill for now, though.
// Make sure the predicate is in the table.
Decoders.insert(StringRef(Decoder));
// Now figure out the index for when we write out the table.
DecoderSet::const_iterator P = std::find(Decoders.begin(),
Decoders.end(),
Decoder.str());
return (unsigned)(P - Decoders.begin());
}
static void emitSinglePredicateMatch(raw_ostream &o, StringRef str,
const std::string &PredicateNamespace) {
if (str[0] == '!')
o << "!Bits[" << PredicateNamespace << "::"
<< str.slice(1,str.size()) << "]";
else
o << "Bits[" << PredicateNamespace << "::" << str << "]";
}
bool FilterChooser::emitPredicateMatch(raw_ostream &o, unsigned &Indentation,
unsigned Opc) const {
ListInit *Predicates =
AllInstructions[Opc]->TheDef->getValueAsListInit("Predicates");
bool IsFirstEmission = true;
for (unsigned i = 0; i < Predicates->size(); ++i) {
Record *Pred = Predicates->getElementAsRecord(i);
if (!Pred->getValue("AssemblerMatcherPredicate"))
continue;
std::string P = Pred->getValueAsString("AssemblerCondString");
if (!P.length())
continue;
if (!IsFirstEmission)
o << " && ";
StringRef SR(P);
std::pair<StringRef, StringRef> pairs = SR.split(',');
while (pairs.second.size()) {
emitSinglePredicateMatch(o, pairs.first, Emitter->PredicateNamespace);
o << " && ";
pairs = pairs.second.split(',');
}
emitSinglePredicateMatch(o, pairs.first, Emitter->PredicateNamespace);
IsFirstEmission = false;
}
return !Predicates->empty();
}
bool FilterChooser::doesOpcodeNeedPredicate(unsigned Opc) const {
ListInit *Predicates =
AllInstructions[Opc]->TheDef->getValueAsListInit("Predicates");
for (unsigned i = 0; i < Predicates->size(); ++i) {
Record *Pred = Predicates->getElementAsRecord(i);
if (!Pred->getValue("AssemblerMatcherPredicate"))
continue;
std::string P = Pred->getValueAsString("AssemblerCondString");
if (!P.length())
continue;
return true;
}
return false;
}
unsigned FilterChooser::getPredicateIndex(DecoderTableInfo &TableInfo,
StringRef Predicate) const {
// Using the full predicate string as the key value here is a bit
// heavyweight, but is effective. If the string comparisons become a
// performance concern, we can implement a mangling of the predicate
// data easilly enough with a map back to the actual string. That's
// overkill for now, though.
// Make sure the predicate is in the table.
TableInfo.Predicates.insert(Predicate.str());
// Now figure out the index for when we write out the table.
PredicateSet::const_iterator P = std::find(TableInfo.Predicates.begin(),
TableInfo.Predicates.end(),
Predicate.str());
return (unsigned)(P - TableInfo.Predicates.begin());
}
void FilterChooser::emitPredicateTableEntry(DecoderTableInfo &TableInfo,
unsigned Opc) const {
if (!doesOpcodeNeedPredicate(Opc))
return;
// Build up the predicate string.
SmallString<256> Predicate;
// FIXME: emitPredicateMatch() functions can take a buffer directly rather
// than a stream.
raw_svector_ostream PS(Predicate);
unsigned I = 0;
emitPredicateMatch(PS, I, Opc);
// Figure out the index into the predicate table for the predicate just
// computed.
unsigned PIdx = getPredicateIndex(TableInfo, PS.str());
SmallString<16> PBytes;
raw_svector_ostream S(PBytes);
encodeULEB128(PIdx, S);
S.flush();
TableInfo.Table.push_back(MCD::OPC_CheckPredicate);
// Predicate index
for (unsigned i = 0, e = PBytes.size(); i != e; ++i)
TableInfo.Table.push_back(PBytes[i]);
// Push location for NumToSkip backpatching.
TableInfo.FixupStack.back().push_back(TableInfo.Table.size());
TableInfo.Table.push_back(0);
TableInfo.Table.push_back(0);
}
void FilterChooser::emitSoftFailTableEntry(DecoderTableInfo &TableInfo,
unsigned Opc) const {
BitsInit *SFBits =
AllInstructions[Opc]->TheDef->getValueAsBitsInit("SoftFail");
if (!SFBits) return;
BitsInit *InstBits = AllInstructions[Opc]->TheDef->getValueAsBitsInit("Inst");
APInt PositiveMask(BitWidth, 0ULL);
APInt NegativeMask(BitWidth, 0ULL);
for (unsigned i = 0; i < BitWidth; ++i) {
bit_value_t B = bitFromBits(*SFBits, i);
bit_value_t IB = bitFromBits(*InstBits, i);
if (B != BIT_TRUE) continue;
switch (IB) {
case BIT_FALSE:
// The bit is meant to be false, so emit a check to see if it is true.
PositiveMask.setBit(i);
break;
case BIT_TRUE:
// The bit is meant to be true, so emit a check to see if it is false.
NegativeMask.setBit(i);
break;
default:
// The bit is not set; this must be an error!
StringRef Name = AllInstructions[Opc]->TheDef->getName();
errs() << "SoftFail Conflict: bit SoftFail{" << i << "} in " << Name
<< " is set but Inst{" << i << "} is unset!\n"
<< " - You can only mark a bit as SoftFail if it is fully defined"
<< " (1/0 - not '?') in Inst\n";
return;
}
}
bool NeedPositiveMask = PositiveMask.getBoolValue();
bool NeedNegativeMask = NegativeMask.getBoolValue();
if (!NeedPositiveMask && !NeedNegativeMask)
return;
TableInfo.Table.push_back(MCD::OPC_SoftFail);
SmallString<16> MaskBytes;
raw_svector_ostream S(MaskBytes);
if (NeedPositiveMask) {
encodeULEB128(PositiveMask.getZExtValue(), S);
S.flush();
for (unsigned i = 0, e = MaskBytes.size(); i != e; ++i)
TableInfo.Table.push_back(MaskBytes[i]);
} else
TableInfo.Table.push_back(0);
if (NeedNegativeMask) {
MaskBytes.clear();
S.resync();
encodeULEB128(NegativeMask.getZExtValue(), S);
S.flush();
for (unsigned i = 0, e = MaskBytes.size(); i != e; ++i)
TableInfo.Table.push_back(MaskBytes[i]);
} else
TableInfo.Table.push_back(0);
}
// Emits table entries to decode the singleton.
void FilterChooser::emitSingletonTableEntry(DecoderTableInfo &TableInfo,
unsigned Opc) const {
std::vector<unsigned> StartBits;
std::vector<unsigned> EndBits;
std::vector<uint64_t> FieldVals;
insn_t Insn;
insnWithID(Insn, Opc);
// Look for islands of undecoded bits of the singleton.
getIslands(StartBits, EndBits, FieldVals, Insn);
unsigned Size = StartBits.size();
// Emit the predicate table entry if one is needed.
emitPredicateTableEntry(TableInfo, Opc);
// Check any additional encoding fields needed.
for (unsigned I = Size; I != 0; --I) {
unsigned NumBits = EndBits[I-1] - StartBits[I-1] + 1;
TableInfo.Table.push_back(MCD::OPC_CheckField);
TableInfo.Table.push_back(StartBits[I-1]);
TableInfo.Table.push_back(NumBits);
uint8_t Buffer[8], *p;
encodeULEB128(FieldVals[I-1], Buffer);
for (p = Buffer; *p >= 128 ; ++p)
TableInfo.Table.push_back(*p);
TableInfo.Table.push_back(*p);
// Push location for NumToSkip backpatching.
TableInfo.FixupStack.back().push_back(TableInfo.Table.size());
// The fixup is always 16-bits, so go ahead and allocate the space
// in the table so all our relative position calculations work OK even
// before we fully resolve the real value here.
TableInfo.Table.push_back(0);
TableInfo.Table.push_back(0);
}
// Check for soft failure of the match.
emitSoftFailTableEntry(TableInfo, Opc);
TableInfo.Table.push_back(MCD::OPC_Decode);
uint8_t Buffer[8], *p;
encodeULEB128(Opc, Buffer);
for (p = Buffer; *p >= 128 ; ++p)
TableInfo.Table.push_back(*p);
TableInfo.Table.push_back(*p);
unsigned DIdx = getDecoderIndex(TableInfo.Decoders, Opc);
SmallString<16> Bytes;
raw_svector_ostream S(Bytes);
encodeULEB128(DIdx, S);
S.flush();
// Decoder index
for (unsigned i = 0, e = Bytes.size(); i != e; ++i)
TableInfo.Table.push_back(Bytes[i]);
}
// Emits table entries to decode the singleton, and then to decode the rest.
void FilterChooser::emitSingletonTableEntry(DecoderTableInfo &TableInfo,
const Filter &Best) const {
unsigned Opc = Best.getSingletonOpc();
// complex singletons need predicate checks from the first singleton
// to refer forward to the variable filterchooser that follows.
TableInfo.FixupStack.emplace_back();
emitSingletonTableEntry(TableInfo, Opc);
resolveTableFixups(TableInfo.Table, TableInfo.FixupStack.back(),
TableInfo.Table.size());
TableInfo.FixupStack.pop_back();
Best.getVariableFC().emitTableEntries(TableInfo);
}
// Assign a single filter and run with it. Top level API client can initialize
// with a single filter to start the filtering process.
void FilterChooser::runSingleFilter(unsigned startBit, unsigned numBit,
bool mixed) {
Filters.clear();
Filters.emplace_back(*this, startBit, numBit, true);
BestIndex = 0; // Sole Filter instance to choose from.
bestFilter().recurse();
}
// reportRegion is a helper function for filterProcessor to mark a region as
// eligible for use as a filter region.
void FilterChooser::reportRegion(bitAttr_t RA, unsigned StartBit,
unsigned BitIndex, bool AllowMixed) {
if (RA == ATTR_MIXED && AllowMixed)
Filters.emplace_back(*this, StartBit, BitIndex - StartBit, true);
else if (RA == ATTR_ALL_SET && !AllowMixed)
Filters.emplace_back(*this, StartBit, BitIndex - StartBit, false);
}
// FilterProcessor scans the well-known encoding bits of the instructions and
// builds up a list of candidate filters. It chooses the best filter and
// recursively descends down the decoding tree.
bool FilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
Filters.clear();
BestIndex = -1;
unsigned numInstructions = Opcodes.size();
assert(numInstructions && "Filter created with no instructions");
// No further filtering is necessary.
if (numInstructions == 1)
return true;
// Heuristics. See also doFilter()'s "Heuristics" comment when num of
// instructions is 3.
if (AllowMixed && !Greedy) {
assert(numInstructions == 3);
for (unsigned i = 0; i < Opcodes.size(); ++i) {
std::vector<unsigned> StartBits;
std::vector<unsigned> EndBits;
std::vector<uint64_t> FieldVals;
insn_t Insn;
insnWithID(Insn, Opcodes[i]);
// Look for islands of undecoded bits of any instruction.
if (getIslands(StartBits, EndBits, FieldVals, Insn) > 0) {
// Found an instruction with island(s). Now just assign a filter.
runSingleFilter(StartBits[0], EndBits[0] - StartBits[0] + 1, true);
return true;
}
}
}
unsigned BitIndex;
// We maintain BIT_WIDTH copies of the bitAttrs automaton.
// The automaton consumes the corresponding bit from each
// instruction.
//
// Input symbols: 0, 1, and _ (unset).
// States: NONE, FILTERED, ALL_SET, ALL_UNSET, and MIXED.
// Initial state: NONE.
//
// (NONE) ------- [01] -> (ALL_SET)
// (NONE) ------- _ ----> (ALL_UNSET)
// (ALL_SET) ---- [01] -> (ALL_SET)
// (ALL_SET) ---- _ ----> (MIXED)
// (ALL_UNSET) -- [01] -> (MIXED)
// (ALL_UNSET) -- _ ----> (ALL_UNSET)
// (MIXED) ------ . ----> (MIXED)
// (FILTERED)---- . ----> (FILTERED)
std::vector<bitAttr_t> bitAttrs;
// FILTERED bit positions provide no entropy and are not worthy of pursuing.
// Filter::recurse() set either BIT_TRUE or BIT_FALSE for each position.
for (BitIndex = 0; BitIndex < BitWidth; ++BitIndex)
if (FilterBitValues[BitIndex] == BIT_TRUE ||
FilterBitValues[BitIndex] == BIT_FALSE)
bitAttrs.push_back(ATTR_FILTERED);
else
bitAttrs.push_back(ATTR_NONE);
for (unsigned InsnIndex = 0; InsnIndex < numInstructions; ++InsnIndex) {
insn_t insn;
insnWithID(insn, Opcodes[InsnIndex]);
for (BitIndex = 0; BitIndex < BitWidth; ++BitIndex) {
switch (bitAttrs[BitIndex]) {
case ATTR_NONE:
if (insn[BitIndex] == BIT_UNSET)
bitAttrs[BitIndex] = ATTR_ALL_UNSET;
else
bitAttrs[BitIndex] = ATTR_ALL_SET;
break;
case ATTR_ALL_SET:
if (insn[BitIndex] == BIT_UNSET)
bitAttrs[BitIndex] = ATTR_MIXED;
break;
case ATTR_ALL_UNSET:
if (insn[BitIndex] != BIT_UNSET)
bitAttrs[BitIndex] = ATTR_MIXED;
break;
case ATTR_MIXED:
case ATTR_FILTERED:
break;
}
}
}
// The regionAttr automaton consumes the bitAttrs automatons' state,
// lowest-to-highest.
//
// Input symbols: F(iltered), (all_)S(et), (all_)U(nset), M(ixed)
// States: NONE, ALL_SET, MIXED
// Initial state: NONE
//
// (NONE) ----- F --> (NONE)
// (NONE) ----- S --> (ALL_SET) ; and set region start
// (NONE) ----- U --> (NONE)
// (NONE) ----- M --> (MIXED) ; and set region start
// (ALL_SET) -- F --> (NONE) ; and report an ALL_SET region
// (ALL_SET) -- S --> (ALL_SET)
// (ALL_SET) -- U --> (NONE) ; and report an ALL_SET region
// (ALL_SET) -- M --> (MIXED) ; and report an ALL_SET region
// (MIXED) ---- F --> (NONE) ; and report a MIXED region
// (MIXED) ---- S --> (ALL_SET) ; and report a MIXED region
// (MIXED) ---- U --> (NONE) ; and report a MIXED region
// (MIXED) ---- M --> (MIXED)
bitAttr_t RA = ATTR_NONE;
unsigned StartBit = 0;
for (BitIndex = 0; BitIndex < BitWidth; ++BitIndex) {
bitAttr_t bitAttr = bitAttrs[BitIndex];
assert(bitAttr != ATTR_NONE && "Bit without attributes");
switch (RA) {
case ATTR_NONE:
switch (bitAttr) {
case ATTR_FILTERED:
break;
case ATTR_ALL_SET:
StartBit = BitIndex;
RA = ATTR_ALL_SET;
break;
case ATTR_ALL_UNSET:
break;
case ATTR_MIXED:
StartBit = BitIndex;
RA = ATTR_MIXED;
break;
default:
llvm_unreachable("Unexpected bitAttr!");
}
break;
case ATTR_ALL_SET:
switch (bitAttr) {
case ATTR_FILTERED:
reportRegion(RA, StartBit, BitIndex, AllowMixed);
RA = ATTR_NONE;
break;
case ATTR_ALL_SET:
break;
case ATTR_ALL_UNSET:
reportRegion(RA, StartBit, BitIndex, AllowMixed);
RA = ATTR_NONE;
break;
case ATTR_MIXED:
reportRegion(RA, StartBit, BitIndex, AllowMixed);
StartBit = BitIndex;
RA = ATTR_MIXED;
break;
default:
llvm_unreachable("Unexpected bitAttr!");
}
break;
case ATTR_MIXED:
switch (bitAttr) {
case ATTR_FILTERED:
reportRegion(RA, StartBit, BitIndex, AllowMixed);
StartBit = BitIndex;
RA = ATTR_NONE;
break;
case ATTR_ALL_SET:
reportRegion(RA, StartBit, BitIndex, AllowMixed);
StartBit = BitIndex;
RA = ATTR_ALL_SET;
break;
case ATTR_ALL_UNSET:
reportRegion(RA, StartBit, BitIndex, AllowMixed);
RA = ATTR_NONE;
break;
case ATTR_MIXED:
break;
default:
llvm_unreachable("Unexpected bitAttr!");
}
break;
case ATTR_ALL_UNSET:
llvm_unreachable("regionAttr state machine has no ATTR_UNSET state");
case ATTR_FILTERED:
llvm_unreachable("regionAttr state machine has no ATTR_FILTERED state");
}
}
// At the end, if we're still in ALL_SET or MIXED states, report a region
switch (RA) {
case ATTR_NONE:
break;
case ATTR_FILTERED:
break;
case ATTR_ALL_SET:
reportRegion(RA, StartBit, BitIndex, AllowMixed);
break;
case ATTR_ALL_UNSET:
break;
case ATTR_MIXED:
reportRegion(RA, StartBit, BitIndex, AllowMixed);
break;
}
// We have finished with the filter processings. Now it's time to choose
// the best performing filter.
BestIndex = 0;
bool AllUseless = true;
unsigned BestScore = 0;
for (unsigned i = 0, e = Filters.size(); i != e; ++i) {
unsigned Usefulness = Filters[i].usefulness();
if (Usefulness)
AllUseless = false;
if (Usefulness > BestScore) {
BestIndex = i;
BestScore = Usefulness;
}
}
if (!AllUseless)
bestFilter().recurse();
return !AllUseless;
} // end of FilterChooser::filterProcessor(bool)
// Decides on the best configuration of filter(s) to use in order to decode
// the instructions. A conflict of instructions may occur, in which case we
// dump the conflict set to the standard error.
void FilterChooser::doFilter() {
unsigned Num = Opcodes.size();
assert(Num && "FilterChooser created with no instructions");
// Try regions of consecutive known bit values first.
if (filterProcessor(false))
return;
// Then regions of mixed bits (both known and unitialized bit values allowed).
if (filterProcessor(true))
return;
// Heuristics to cope with conflict set {t2CMPrs, t2SUBSrr, t2SUBSrs} where
// no single instruction for the maximum ATTR_MIXED region Inst{14-4} has a
// well-known encoding pattern. In such case, we backtrack and scan for the
// the very first consecutive ATTR_ALL_SET region and assign a filter to it.
if (Num == 3 && filterProcessor(true, false))
return;
// If we come to here, the instruction decoding has failed.
// Set the BestIndex to -1 to indicate so.
BestIndex = -1;
}
// emitTableEntries - Emit state machine entries to decode our share of
// instructions.
void FilterChooser::emitTableEntries(DecoderTableInfo &TableInfo) const {
if (Opcodes.size() == 1) {
// There is only one instruction in the set, which is great!
// Call emitSingletonDecoder() to see whether there are any remaining
// encodings bits.
emitSingletonTableEntry(TableInfo, Opcodes[0]);
return;
}
// Choose the best filter to do the decodings!
if (BestIndex != -1) {
const Filter &Best = Filters[BestIndex];
if (Best.getNumFiltered() == 1)
emitSingletonTableEntry(TableInfo, Best);
else
Best.emitTableEntry(TableInfo);
return;
}
// We don't know how to decode these instructions! Dump the
// conflict set and bail.
// Print out useful conflict information for postmortem analysis.
errs() << "Decoding Conflict:\n";
dumpStack(errs(), "\t\t");
for (unsigned i = 0; i < Opcodes.size(); ++i) {
const std::string &Name = nameWithID(Opcodes[i]);
errs() << '\t' << Name << " ";
dumpBits(errs(),
getBitsField(*AllInstructions[Opcodes[i]]->TheDef, "Inst"));
errs() << '\n';
}
}
static bool populateInstruction(CodeGenTarget &Target,
const CodeGenInstruction &CGI, unsigned Opc,
std::map<unsigned, std::vector<OperandInfo> > &Operands){
const Record &Def = *CGI.TheDef;
// If all the bit positions are not specified; do not decode this instruction.
// We are bound to fail! For proper disassembly, the well-known encoding bits
// of the instruction must be fully specified.
BitsInit &Bits = getBitsField(Def, "Inst");
if (Bits.allInComplete()) return false;
std::vector<OperandInfo> InsnOperands;
// If the instruction has specified a custom decoding hook, use that instead
// of trying to auto-generate the decoder.
std::string InstDecoder = Def.getValueAsString("DecoderMethod");
if (InstDecoder != "") {
InsnOperands.push_back(OperandInfo(InstDecoder));
Operands[Opc] = InsnOperands;
return true;
}
// Generate a description of the operand of the instruction that we know
// how to decode automatically.
// FIXME: We'll need to have a way to manually override this as needed.
// Gather the outputs/inputs of the instruction, so we can find their
// positions in the encoding. This assumes for now that they appear in the
// MCInst in the order that they're listed.
std::vector<std::pair<Init*, std::string> > InOutOperands;
DagInit *Out = Def.getValueAsDag("OutOperandList");
DagInit *In = Def.getValueAsDag("InOperandList");
for (unsigned i = 0; i < Out->getNumArgs(); ++i)
InOutOperands.push_back(std::make_pair(Out->getArg(i), Out->getArgName(i)));
for (unsigned i = 0; i < In->getNumArgs(); ++i)
InOutOperands.push_back(std::make_pair(In->getArg(i), In->getArgName(i)));
// Search for tied operands, so that we can correctly instantiate
// operands that are not explicitly represented in the encoding.
std::map<std::string, std::string> TiedNames;
for (unsigned i = 0; i < CGI.Operands.size(); ++i) {
int tiedTo = CGI.Operands[i].getTiedRegister();
if (tiedTo != -1) {
std::pair<unsigned, unsigned> SO =
CGI.Operands.getSubOperandNumber(tiedTo);
TiedNames[InOutOperands[i].second] = InOutOperands[SO.first].second;
TiedNames[InOutOperands[SO.first].second] = InOutOperands[i].second;
}
}
std::map<std::string, std::vector<OperandInfo> > NumberedInsnOperands;
std::set<std::string> NumberedInsnOperandsNoTie;
if (Target.getInstructionSet()->
getValueAsBit("decodePositionallyEncodedOperands")) {
const std::vector<RecordVal> &Vals = Def.getValues();
unsigned NumberedOp = 0;
std::set<unsigned> NamedOpIndices;
if (Target.getInstructionSet()->
getValueAsBit("noNamedPositionallyEncodedOperands"))
// Collect the set of operand indices that might correspond to named
// operand, and skip these when assigning operands based on position.
for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
unsigned OpIdx;
if (!CGI.Operands.hasOperandNamed(Vals[i].getName(), OpIdx))
continue;
NamedOpIndices.insert(OpIdx);
}
for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
// Ignore fixed fields in the record, we're looking for values like:
// bits<5> RST = { ?, ?, ?, ?, ? };
if (Vals[i].getPrefix() || Vals[i].getValue()->isComplete())
continue;
// Determine if Vals[i] actually contributes to the Inst encoding.
unsigned bi = 0;
for (; bi < Bits.getNumBits(); ++bi) {
VarInit *Var = nullptr;
VarBitInit *BI = dyn_cast<VarBitInit>(Bits.getBit(bi));
if (BI)
Var = dyn_cast<VarInit>(BI->getBitVar());
else
Var = dyn_cast<VarInit>(Bits.getBit(bi));
if (Var && Var->getName() == Vals[i].getName())
break;
}
if (bi == Bits.getNumBits())
continue;
// Skip variables that correspond to explicitly-named operands.
unsigned OpIdx;
if (CGI.Operands.hasOperandNamed(Vals[i].getName(), OpIdx))
continue;
// Get the bit range for this operand:
unsigned bitStart = bi++, bitWidth = 1;
for (; bi < Bits.getNumBits(); ++bi) {
VarInit *Var = nullptr;
VarBitInit *BI = dyn_cast<VarBitInit>(Bits.getBit(bi));
if (BI)
Var = dyn_cast<VarInit>(BI->getBitVar());
else
Var = dyn_cast<VarInit>(Bits.getBit(bi));
if (!Var)
break;
if (Var->getName() != Vals[i].getName())
break;
++bitWidth;
}
unsigned NumberOps = CGI.Operands.size();
while (NumberedOp < NumberOps &&
(CGI.Operands.isFlatOperandNotEmitted(NumberedOp) ||
(!NamedOpIndices.empty() && NamedOpIndices.count(
CGI.Operands.getSubOperandNumber(NumberedOp).first))))
++NumberedOp;
OpIdx = NumberedOp++;
// OpIdx now holds the ordered operand number of Vals[i].
std::pair<unsigned, unsigned> SO =
CGI.Operands.getSubOperandNumber(OpIdx);
const std::string &Name = CGI.Operands[SO.first].Name;
DEBUG(dbgs() << "Numbered operand mapping for " << Def.getName() << ": " <<
Name << "(" << SO.first << ", " << SO.second << ") => " <<
Vals[i].getName() << "\n");
std::string Decoder = "";
Record *TypeRecord = CGI.Operands[SO.first].Rec;
RecordVal *DecoderString = TypeRecord->getValue("DecoderMethod");
StringInit *String = DecoderString ?
dyn_cast<StringInit>(DecoderString->getValue()) : nullptr;
if (String && String->getValue() != "")
Decoder = String->getValue();
if (Decoder == "" &&
CGI.Operands[SO.first].MIOperandInfo &&
CGI.Operands[SO.first].MIOperandInfo->getNumArgs()) {
Init *Arg = CGI.Operands[SO.first].MIOperandInfo->
getArg(SO.second);
if (TypedInit *TI = cast<TypedInit>(Arg)) {
RecordRecTy *Type = cast<RecordRecTy>(TI->getType());
TypeRecord = Type->getRecord();
}
}
bool isReg = false;
if (TypeRecord->isSubClassOf("RegisterOperand"))
TypeRecord = TypeRecord->getValueAsDef("RegClass");
if (TypeRecord->isSubClassOf("RegisterClass")) {
Decoder = "Decode" + TypeRecord->getName() + "RegisterClass";
isReg = true;
} else if (TypeRecord->isSubClassOf("PointerLikeRegClass")) {
Decoder = "DecodePointerLikeRegClass" +
utostr(TypeRecord->getValueAsInt("RegClassKind"));
isReg = true;
}
DecoderString = TypeRecord->getValue("DecoderMethod");
String = DecoderString ?
dyn_cast<StringInit>(DecoderString->getValue()) : nullptr;
if (!isReg && String && String->getValue() != "")
Decoder = String->getValue();
OperandInfo OpInfo(Decoder);
OpInfo.addField(bitStart, bitWidth, 0);
NumberedInsnOperands[Name].push_back(OpInfo);
// FIXME: For complex operands with custom decoders we can't handle tied
// sub-operands automatically. Skip those here and assume that this is
// fixed up elsewhere.
if (CGI.Operands[SO.first].MIOperandInfo &&
CGI.Operands[SO.first].MIOperandInfo->getNumArgs() > 1 &&
String && String->getValue() != "")
NumberedInsnOperandsNoTie.insert(Name);
}
}
// For each operand, see if we can figure out where it is encoded.
for (const auto &Op : InOutOperands) {
if (!NumberedInsnOperands[Op.second].empty()) {
InsnOperands.insert(InsnOperands.end(),
NumberedInsnOperands[Op.second].begin(),
NumberedInsnOperands[Op.second].end());
continue;
}
if (!NumberedInsnOperands[TiedNames[Op.second]].empty()) {
if (!NumberedInsnOperandsNoTie.count(TiedNames[Op.second])) {
// Figure out to which (sub)operand we're tied.
unsigned i = CGI.Operands.getOperandNamed(TiedNames[Op.second]);
int tiedTo = CGI.Operands[i].getTiedRegister();
if (tiedTo == -1) {
i = CGI.Operands.getOperandNamed(Op.second);
tiedTo = CGI.Operands[i].getTiedRegister();
}
if (tiedTo != -1) {
std::pair<unsigned, unsigned> SO =
CGI.Operands.getSubOperandNumber(tiedTo);
InsnOperands.push_back(NumberedInsnOperands[TiedNames[Op.second]]
[SO.second]);
}
}
continue;
}
std::string Decoder = "";
// At this point, we can locate the field, but we need to know how to
// interpret it. As a first step, require the target to provide callbacks
// for decoding register classes.
// FIXME: This need to be extended to handle instructions with custom
// decoder methods, and operands with (simple) MIOperandInfo's.
TypedInit *TI = cast<TypedInit>(Op.first);
RecordRecTy *Type = cast<RecordRecTy>(TI->getType());
Record *TypeRecord = Type->getRecord();
bool isReg = false;
if (TypeRecord->isSubClassOf("RegisterOperand"))
TypeRecord = TypeRecord->getValueAsDef("RegClass");
if (TypeRecord->isSubClassOf("RegisterClass")) {
Decoder = "Decode" + TypeRecord->getName() + "RegisterClass";
isReg = true;
} else if (TypeRecord->isSubClassOf("PointerLikeRegClass")) {
Decoder = "DecodePointerLikeRegClass" +
utostr(TypeRecord->getValueAsInt("RegClassKind"));
isReg = true;
}
RecordVal *DecoderString = TypeRecord->getValue("DecoderMethod");
StringInit *String = DecoderString ?
dyn_cast<StringInit>(DecoderString->getValue()) : nullptr;
if (!isReg && String && String->getValue() != "")
Decoder = String->getValue();
OperandInfo OpInfo(Decoder);
unsigned Base = ~0U;
unsigned Width = 0;
unsigned Offset = 0;
for (unsigned bi = 0; bi < Bits.getNumBits(); ++bi) {
VarInit *Var = nullptr;
VarBitInit *BI = dyn_cast<VarBitInit>(Bits.getBit(bi));
if (BI)
Var = dyn_cast<VarInit>(BI->getBitVar());
else
Var = dyn_cast<VarInit>(Bits.getBit(bi));
if (!Var) {
if (Base != ~0U) {
OpInfo.addField(Base, Width, Offset);
Base = ~0U;
Width = 0;
Offset = 0;
}
continue;
}
if (Var->getName() != Op.second &&
Var->getName() != TiedNames[Op.second]) {
if (Base != ~0U) {
OpInfo.addField(Base, Width, Offset);
Base = ~0U;
Width = 0;
Offset = 0;
}
continue;
}
if (Base == ~0U) {
Base = bi;
Width = 1;
Offset = BI ? BI->getBitNum() : 0;
} else if (BI && BI->getBitNum() != Offset + Width) {
OpInfo.addField(Base, Width, Offset);
Base = bi;
Width = 1;
Offset = BI->getBitNum();
} else {
++Width;
}
}
if (Base != ~0U)
OpInfo.addField(Base, Width, Offset);
if (OpInfo.numFields() > 0)
InsnOperands.push_back(OpInfo);
}
Operands[Opc] = InsnOperands;
#if 0
DEBUG({
// Dumps the instruction encoding bits.
dumpBits(errs(), Bits);
errs() << '\n';
// Dumps the list of operand info.
for (unsigned i = 0, e = CGI.Operands.size(); i != e; ++i) {
const CGIOperandList::OperandInfo &Info = CGI.Operands[i];
const std::string &OperandName = Info.Name;
const Record &OperandDef = *Info.Rec;
errs() << "\t" << OperandName << " (" << OperandDef.getName() << ")\n";
}
});
#endif
return true;
}
// emitFieldFromInstruction - Emit the templated helper function
// fieldFromInstruction().
static void emitFieldFromInstruction(formatted_raw_ostream &OS) {
OS << "// Helper function for extracting fields from encoded instructions.\n"
<< "template<typename InsnType>\n"
<< "static InsnType fieldFromInstruction(InsnType insn, unsigned startBit,\n"
<< " unsigned numBits) {\n"
<< " assert(startBit + numBits <= (sizeof(InsnType)*8) &&\n"
<< " \"Instruction field out of bounds!\");\n"
<< " InsnType fieldMask;\n"
<< " if (numBits == sizeof(InsnType)*8)\n"
<< " fieldMask = (InsnType)(-1LL);\n"
<< " else\n"
<< " fieldMask = (((InsnType)1 << numBits) - 1) << startBit;\n"
<< " return (insn & fieldMask) >> startBit;\n"
<< "}\n\n";
}
// emitDecodeInstruction - Emit the templated helper function
// decodeInstruction().
static void emitDecodeInstruction(formatted_raw_ostream &OS) {
OS << "template<typename InsnType>\n"
<< "static DecodeStatus decodeInstruction(const uint8_t DecodeTable[], MCInst &MI,\n"
<< " InsnType insn, uint64_t Address,\n"
<< " const void *DisAsm,\n"
<< " const MCSubtargetInfo &STI) {\n"
<< " const FeatureBitset& Bits = STI.getFeatureBits();\n"
<< "\n"
<< " const uint8_t *Ptr = DecodeTable;\n"
<< " uint32_t CurFieldValue = 0;\n"
<< " DecodeStatus S = MCDisassembler::Success;\n"
<< " for (;;) {\n"
<< " ptrdiff_t Loc = Ptr - DecodeTable;\n"
<< " switch (*Ptr) {\n"
<< " default:\n"
<< " errs() << Loc << \": Unexpected decode table opcode!\\n\";\n"
<< " return MCDisassembler::Fail;\n"
<< " case MCD::OPC_ExtractField: {\n"
<< " unsigned Start = *++Ptr;\n"
<< " unsigned Len = *++Ptr;\n"
<< " ++Ptr;\n"
<< " CurFieldValue = fieldFromInstruction(insn, Start, Len);\n"
<< " DEBUG(dbgs() << Loc << \": OPC_ExtractField(\" << Start << \", \"\n"
<< " << Len << \"): \" << CurFieldValue << \"\\n\");\n"
<< " break;\n"
<< " }\n"
<< " case MCD::OPC_FilterValue: {\n"
<< " // Decode the field value.\n"
<< " unsigned Len;\n"
<< " InsnType Val = decodeULEB128(++Ptr, &Len);\n"
<< " Ptr += Len;\n"
<< " // NumToSkip is a plain 16-bit integer.\n"
<< " unsigned NumToSkip = *Ptr++;\n"
<< " NumToSkip |= (*Ptr++) << 8;\n"
<< "\n"
<< " // Perform the filter operation.\n"
<< " if (Val != CurFieldValue)\n"
<< " Ptr += NumToSkip;\n"
<< " DEBUG(dbgs() << Loc << \": OPC_FilterValue(\" << Val << \", \" << NumToSkip\n"
<< " << \"): \" << ((Val != CurFieldValue) ? \"FAIL:\" : \"PASS:\")\n"
<< " << \" continuing at \" << (Ptr - DecodeTable) << \"\\n\");\n"
<< "\n"
<< " break;\n"
<< " }\n"
<< " case MCD::OPC_CheckField: {\n"
<< " unsigned Start = *++Ptr;\n"
<< " unsigned Len = *++Ptr;\n"
<< " InsnType FieldValue = fieldFromInstruction(insn, Start, Len);\n"
<< " // Decode the field value.\n"
<< " uint32_t ExpectedValue = decodeULEB128(++Ptr, &Len);\n"
<< " Ptr += Len;\n"
<< " // NumToSkip is a plain 16-bit integer.\n"
<< " unsigned NumToSkip = *Ptr++;\n"
<< " NumToSkip |= (*Ptr++) << 8;\n"
<< "\n"
<< " // If the actual and expected values don't match, skip.\n"
<< " if (ExpectedValue != FieldValue)\n"
<< " Ptr += NumToSkip;\n"
<< " DEBUG(dbgs() << Loc << \": OPC_CheckField(\" << Start << \", \"\n"
<< " << Len << \", \" << ExpectedValue << \", \" << NumToSkip\n"
<< " << \"): FieldValue = \" << FieldValue << \", ExpectedValue = \"\n"
<< " << ExpectedValue << \": \"\n"
<< " << ((ExpectedValue == FieldValue) ? \"PASS\\n\" : \"FAIL\\n\"));\n"
<< " break;\n"
<< " }\n"
<< " case MCD::OPC_CheckPredicate: {\n"
<< " unsigned Len;\n"
<< " // Decode the Predicate Index value.\n"
<< " unsigned PIdx = decodeULEB128(++Ptr, &Len);\n"
<< " Ptr += Len;\n"
<< " // NumToSkip is a plain 16-bit integer.\n"
<< " unsigned NumToSkip = *Ptr++;\n"
<< " NumToSkip |= (*Ptr++) << 8;\n"
<< " // Check the predicate.\n"
<< " bool Pred;\n"
<< " if (!(Pred = checkDecoderPredicate(PIdx, Bits)))\n"
<< " Ptr += NumToSkip;\n"
<< " (void)Pred;\n"
<< " DEBUG(dbgs() << Loc << \": OPC_CheckPredicate(\" << PIdx << \"): \"\n"
<< " << (Pred ? \"PASS\\n\" : \"FAIL\\n\"));\n"
<< "\n"
<< " break;\n"
<< " }\n"
<< " case MCD::OPC_Decode: {\n"
<< " unsigned Len;\n"
<< " // Decode the Opcode value.\n"
<< " unsigned Opc = decodeULEB128(++Ptr, &Len);\n"
<< " Ptr += Len;\n"
<< " unsigned DecodeIdx = decodeULEB128(Ptr, &Len);\n"
<< " Ptr += Len;\n"
<< " DEBUG(dbgs() << Loc << \": OPC_Decode: opcode \" << Opc\n"
<< " << \", using decoder \" << DecodeIdx << \"\\n\" );\n"
<< " DEBUG(dbgs() << \"----- DECODE SUCCESSFUL -----\\n\");\n"
<< "\n"
<< " MI.setOpcode(Opc);\n"
<< " return decodeToMCInst(S, DecodeIdx, insn, MI, Address, DisAsm);\n"
<< " }\n"
<< " case MCD::OPC_SoftFail: {\n"
<< " // Decode the mask values.\n"
<< " unsigned Len;\n"
<< " InsnType PositiveMask = decodeULEB128(++Ptr, &Len);\n"
<< " Ptr += Len;\n"
<< " InsnType NegativeMask = decodeULEB128(Ptr, &Len);\n"
<< " Ptr += Len;\n"
<< " bool Fail = (insn & PositiveMask) || (~insn & NegativeMask);\n"
<< " if (Fail)\n"
<< " S = MCDisassembler::SoftFail;\n"
<< " DEBUG(dbgs() << Loc << \": OPC_SoftFail: \" << (Fail ? \"FAIL\\n\":\"PASS\\n\"));\n"
<< " break;\n"
<< " }\n"
<< " case MCD::OPC_Fail: {\n"
<< " DEBUG(dbgs() << Loc << \": OPC_Fail\\n\");\n"
<< " return MCDisassembler::Fail;\n"
<< " }\n"
<< " }\n"
<< " }\n"
<< " llvm_unreachable(\"bogosity detected in disassembler state machine!\");\n"
<< "}\n\n";
}
// Emits disassembler code for instruction decoding.
void FixedLenDecoderEmitter::run(raw_ostream &o) {
formatted_raw_ostream OS(o);
OS << "#include \"llvm/MC/MCInst.h\"\n";
OS << "#include \"llvm/Support/Debug.h\"\n";
OS << "#include \"llvm/Support/DataTypes.h\"\n";
OS << "#include \"llvm/Support/LEB128.h\"\n";
OS << "#include \"llvm/Support/raw_ostream.h\"\n";
OS << "#include <assert.h>\n";
OS << '\n';
OS << "namespace llvm {\n\n";
emitFieldFromInstruction(OS);
Target.reverseBitsForLittleEndianEncoding();
// Parameterize the decoders based on namespace and instruction width.
NumberedInstructions = &Target.getInstructionsByEnumValue();
std::map<std::pair<std::string, unsigned>,
std::vector<unsigned> > OpcMap;
std::map<unsigned, std::vector<OperandInfo> > Operands;
for (unsigned i = 0; i < NumberedInstructions->size(); ++i) {
const CodeGenInstruction *Inst = NumberedInstructions->at(i);
const Record *Def = Inst->TheDef;
unsigned Size = Def->getValueAsInt("Size");
if (Def->getValueAsString("Namespace") == "TargetOpcode" ||
Def->getValueAsBit("isPseudo") ||
Def->getValueAsBit("isAsmParserOnly") ||
Def->getValueAsBit("isCodeGenOnly"))
continue;
std::string DecoderNamespace = Def->getValueAsString("DecoderNamespace");
if (Size) {
if (populateInstruction(Target, *Inst, i, Operands)) {
OpcMap[std::make_pair(DecoderNamespace, Size)].push_back(i);
}
}
}
DecoderTableInfo TableInfo;
for (const auto &Opc : OpcMap) {
// Emit the decoder for this namespace+width combination.
FilterChooser FC(*NumberedInstructions, Opc.second, Operands,
8*Opc.first.second, this);
// The decode table is cleared for each top level decoder function. The
// predicates and decoders themselves, however, are shared across all
// decoders to give more opportunities for uniqueing.
TableInfo.Table.clear();
TableInfo.FixupStack.clear();
TableInfo.Table.reserve(16384);
TableInfo.FixupStack.emplace_back();
FC.emitTableEntries(TableInfo);
// Any NumToSkip fixups in the top level scope can resolve to the
// OPC_Fail at the end of the table.
assert(TableInfo.FixupStack.size() == 1 && "fixup stack phasing error!");
// Resolve any NumToSkip fixups in the current scope.
resolveTableFixups(TableInfo.Table, TableInfo.FixupStack.back(),
TableInfo.Table.size());
TableInfo.FixupStack.clear();
TableInfo.Table.push_back(MCD::OPC_Fail);
// Print the table to the output stream.
emitTable(OS, TableInfo.Table, 0, FC.getBitWidth(), Opc.first.first);
OS.flush();
}
// Emit the predicate function.
emitPredicateFunction(OS, TableInfo.Predicates, 0);
// Emit the decoder function.
emitDecoderFunction(OS, TableInfo.Decoders, 0);
// Emit the main entry point for the decoder, decodeInstruction().
emitDecodeInstruction(OS);
OS << "\n} // End llvm namespace\n";
}
namespace llvm {
void EmitFixedLenDecoder(RecordKeeper &RK, raw_ostream &OS,
std::string PredicateNamespace,
std::string GPrefix,
std::string GPostfix,
std::string ROK,
std::string RFail,
std::string L) {
FixedLenDecoderEmitter(RK, PredicateNamespace, GPrefix, GPostfix,
ROK, RFail, L).run(OS);
}
} // End llvm namespace
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/LLVMBuild.txt | ;===- ./utils/TableGen/LLVMBuild.txt ---------------------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
[component_0]
type = BuildTool
name = tblgen
parent = BuildTools
required_libraries = Support TableGen
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/DAGISelMatcherOpt.cpp | //===- DAGISelMatcherOpt.cpp - Optimize a DAG Matcher ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the DAG Matcher optimizer.
//
//===----------------------------------------------------------------------===//
#include "DAGISelMatcher.h"
#include "CodeGenDAGPatterns.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "isel-opt"
/// ContractNodes - Turn multiple matcher node patterns like 'MoveChild+Record'
/// into single compound nodes like RecordChild.
static void ContractNodes(std::unique_ptr<Matcher> &MatcherPtr,
const CodeGenDAGPatterns &CGP) {
// If we reached the end of the chain, we're done.
Matcher *N = MatcherPtr.get();
if (!N) return;
// If we have a scope node, walk down all of the children.
if (ScopeMatcher *Scope = dyn_cast<ScopeMatcher>(N)) {
for (unsigned i = 0, e = Scope->getNumChildren(); i != e; ++i) {
std::unique_ptr<Matcher> Child(Scope->takeChild(i));
ContractNodes(Child, CGP);
Scope->resetChild(i, Child.release());
}
return;
}
// If we found a movechild node with a node that comes in a 'foochild' form,
// transform it.
if (MoveChildMatcher *MC = dyn_cast<MoveChildMatcher>(N)) {
Matcher *New = nullptr;
if (RecordMatcher *RM = dyn_cast<RecordMatcher>(MC->getNext()))
if (MC->getChildNo() < 8) // Only have RecordChild0...7
New = new RecordChildMatcher(MC->getChildNo(), RM->getWhatFor(),
RM->getResultNo());
if (CheckTypeMatcher *CT = dyn_cast<CheckTypeMatcher>(MC->getNext()))
if (MC->getChildNo() < 8 && // Only have CheckChildType0...7
CT->getResNo() == 0) // CheckChildType checks res #0
New = new CheckChildTypeMatcher(MC->getChildNo(), CT->getType());
if (CheckSameMatcher *CS = dyn_cast<CheckSameMatcher>(MC->getNext()))
if (MC->getChildNo() < 4) // Only have CheckChildSame0...3
New = new CheckChildSameMatcher(MC->getChildNo(), CS->getMatchNumber());
if (CheckIntegerMatcher *CS = dyn_cast<CheckIntegerMatcher>(MC->getNext()))
if (MC->getChildNo() < 5) // Only have CheckChildInteger0...4
New = new CheckChildIntegerMatcher(MC->getChildNo(), CS->getValue());
if (New) {
// Insert the new node.
New->setNext(MatcherPtr.release());
MatcherPtr.reset(New);
// Remove the old one.
MC->setNext(MC->getNext()->takeNext());
return ContractNodes(MatcherPtr, CGP);
}
}
// Zap movechild -> moveparent.
if (MoveChildMatcher *MC = dyn_cast<MoveChildMatcher>(N))
if (MoveParentMatcher *MP =
dyn_cast<MoveParentMatcher>(MC->getNext())) {
MatcherPtr.reset(MP->takeNext());
return ContractNodes(MatcherPtr, CGP);
}
// Turn EmitNode->MarkFlagResults->CompleteMatch into
// MarkFlagResults->EmitNode->CompleteMatch when we can to encourage
// MorphNodeTo formation. This is safe because MarkFlagResults never refers
// to the root of the pattern.
if (isa<EmitNodeMatcher>(N) && isa<MarkGlueResultsMatcher>(N->getNext()) &&
isa<CompleteMatchMatcher>(N->getNext()->getNext())) {
// Unlink the two nodes from the list.
Matcher *EmitNode = MatcherPtr.release();
Matcher *MFR = EmitNode->takeNext();
Matcher *Tail = MFR->takeNext();
// Relink them.
MatcherPtr.reset(MFR);
MFR->setNext(EmitNode);
EmitNode->setNext(Tail);
return ContractNodes(MatcherPtr, CGP);
}
// Turn EmitNode->CompleteMatch into MorphNodeTo if we can.
if (EmitNodeMatcher *EN = dyn_cast<EmitNodeMatcher>(N))
if (CompleteMatchMatcher *CM =
dyn_cast<CompleteMatchMatcher>(EN->getNext())) {
// We can only use MorphNodeTo if the result values match up.
unsigned RootResultFirst = EN->getFirstResultSlot();
bool ResultsMatch = true;
for (unsigned i = 0, e = CM->getNumResults(); i != e; ++i)
if (CM->getResult(i) != RootResultFirst+i)
ResultsMatch = false;
// If the selected node defines a subset of the glue/chain results, we
// can't use MorphNodeTo. For example, we can't use MorphNodeTo if the
// matched pattern has a chain but the root node doesn't.
const PatternToMatch &Pattern = CM->getPattern();
if (!EN->hasChain() &&
Pattern.getSrcPattern()->NodeHasProperty(SDNPHasChain, CGP))
ResultsMatch = false;
// If the matched node has glue and the output root doesn't, we can't
// use MorphNodeTo.
//
// NOTE: Strictly speaking, we don't have to check for glue here
// because the code in the pattern generator doesn't handle it right. We
// do it anyway for thoroughness.
if (!EN->hasOutFlag() &&
Pattern.getSrcPattern()->NodeHasProperty(SDNPOutGlue, CGP))
ResultsMatch = false;
// If the root result node defines more results than the source root node
// *and* has a chain or glue input, then we can't match it because it
// would end up replacing the extra result with the chain/glue.
#if 0
if ((EN->hasGlue() || EN->hasChain()) &&
EN->getNumNonChainGlueVTs() > ... need to get no results reliably ...)
ResultMatch = false;
#endif
if (ResultsMatch) {
const SmallVectorImpl<MVT::SimpleValueType> &VTs = EN->getVTList();
const SmallVectorImpl<unsigned> &Operands = EN->getOperandList();
MatcherPtr.reset(new MorphNodeToMatcher(EN->getOpcodeName(),
VTs, Operands,
EN->hasChain(), EN->hasInFlag(),
EN->hasOutFlag(),
EN->hasMemRefs(),
EN->getNumFixedArityOperands(),
Pattern));
return;
}
// FIXME2: Kill off all the SelectionDAG::SelectNodeTo and getMachineNode
// variants.
}
ContractNodes(N->getNextPtr(), CGP);
// If we have a CheckType/CheckChildType/Record node followed by a
// CheckOpcode, invert the two nodes. We prefer to do structural checks
// before type checks, as this opens opportunities for factoring on targets
// like X86 where many operations are valid on multiple types.
if ((isa<CheckTypeMatcher>(N) || isa<CheckChildTypeMatcher>(N) ||
isa<RecordMatcher>(N)) &&
isa<CheckOpcodeMatcher>(N->getNext())) {
// Unlink the two nodes from the list.
Matcher *CheckType = MatcherPtr.release();
Matcher *CheckOpcode = CheckType->takeNext();
Matcher *Tail = CheckOpcode->takeNext();
// Relink them.
MatcherPtr.reset(CheckOpcode);
CheckOpcode->setNext(CheckType);
CheckType->setNext(Tail);
return ContractNodes(MatcherPtr, CGP);
}
}
/// SinkPatternPredicates - Pattern predicates can be checked at any level of
/// the matching tree. The generator dumps them at the top level of the pattern
/// though, which prevents factoring from being able to see past them. This
/// optimization sinks them as far down into the pattern as possible.
///
/// Conceptually, we'd like to sink these predicates all the way to the last
/// matcher predicate in the series. However, it turns out that some
/// ComplexPatterns have side effects on the graph, so we really don't want to
/// run a complex pattern if the pattern predicate will fail. For this
/// reason, we refuse to sink the pattern predicate past a ComplexPattern.
///
static void SinkPatternPredicates(std::unique_ptr<Matcher> &MatcherPtr) {
// Recursively scan for a PatternPredicate.
// If we reached the end of the chain, we're done.
Matcher *N = MatcherPtr.get();
if (!N) return;
// Walk down all members of a scope node.
if (ScopeMatcher *Scope = dyn_cast<ScopeMatcher>(N)) {
for (unsigned i = 0, e = Scope->getNumChildren(); i != e; ++i) {
std::unique_ptr<Matcher> Child(Scope->takeChild(i));
SinkPatternPredicates(Child);
Scope->resetChild(i, Child.release());
}
return;
}
// If this node isn't a CheckPatternPredicateMatcher we keep scanning until
// we find one.
CheckPatternPredicateMatcher *CPPM =dyn_cast<CheckPatternPredicateMatcher>(N);
if (!CPPM)
return SinkPatternPredicates(N->getNextPtr());
// Ok, we found one, lets try to sink it. Check if we can sink it past the
// next node in the chain. If not, we won't be able to change anything and
// might as well bail.
if (!CPPM->getNext()->isSafeToReorderWithPatternPredicate())
return;
// Okay, we know we can sink it past at least one node. Unlink it from the
// chain and scan for the new insertion point.
MatcherPtr.release(); // Don't delete CPPM.
MatcherPtr.reset(CPPM->takeNext());
N = MatcherPtr.get();
while (N->getNext()->isSafeToReorderWithPatternPredicate())
N = N->getNext();
// At this point, we want to insert CPPM after N.
CPPM->setNext(N->takeNext());
N->setNext(CPPM);
}
/// FindNodeWithKind - Scan a series of matchers looking for a matcher with a
/// specified kind. Return null if we didn't find one otherwise return the
/// matcher.
static Matcher *FindNodeWithKind(Matcher *M, Matcher::KindTy Kind) {
for (; M; M = M->getNext())
if (M->getKind() == Kind)
return M;
return nullptr;
}
/// FactorNodes - Turn matches like this:
/// Scope
/// OPC_CheckType i32
/// ABC
/// OPC_CheckType i32
/// XYZ
/// into:
/// OPC_CheckType i32
/// Scope
/// ABC
/// XYZ
///
static void FactorNodes(std::unique_ptr<Matcher> &MatcherPtr) {
// If we reached the end of the chain, we're done.
Matcher *N = MatcherPtr.get();
if (!N) return;
// If this is not a push node, just scan for one.
ScopeMatcher *Scope = dyn_cast<ScopeMatcher>(N);
if (!Scope)
return FactorNodes(N->getNextPtr());
// Okay, pull together the children of the scope node into a vector so we can
// inspect it more easily. While we're at it, bucket them up by the hash
// code of their first predicate.
SmallVector<Matcher*, 32> OptionsToMatch;
for (unsigned i = 0, e = Scope->getNumChildren(); i != e; ++i) {
// Factor the subexpression.
std::unique_ptr<Matcher> Child(Scope->takeChild(i));
FactorNodes(Child);
if (Matcher *N = Child.release())
OptionsToMatch.push_back(N);
}
SmallVector<Matcher*, 32> NewOptionsToMatch;
// Loop over options to match, merging neighboring patterns with identical
// starting nodes into a shared matcher.
for (unsigned OptionIdx = 0, e = OptionsToMatch.size(); OptionIdx != e;) {
// Find the set of matchers that start with this node.
Matcher *Optn = OptionsToMatch[OptionIdx++];
if (OptionIdx == e) {
NewOptionsToMatch.push_back(Optn);
continue;
}
// See if the next option starts with the same matcher. If the two
// neighbors *do* start with the same matcher, we can factor the matcher out
// of at least these two patterns. See what the maximal set we can merge
// together is.
SmallVector<Matcher*, 8> EqualMatchers;
EqualMatchers.push_back(Optn);
// Factor all of the known-equal matchers after this one into the same
// group.
while (OptionIdx != e && OptionsToMatch[OptionIdx]->isEqual(Optn))
EqualMatchers.push_back(OptionsToMatch[OptionIdx++]);
// If we found a non-equal matcher, see if it is contradictory with the
// current node. If so, we know that the ordering relation between the
// current sets of nodes and this node don't matter. Look past it to see if
// we can merge anything else into this matching group.
unsigned Scan = OptionIdx;
while (1) {
// If we ran out of stuff to scan, we're done.
if (Scan == e) break;
Matcher *ScanMatcher = OptionsToMatch[Scan];
// If we found an entry that matches out matcher, merge it into the set to
// handle.
if (Optn->isEqual(ScanMatcher)) {
// If is equal after all, add the option to EqualMatchers and remove it
// from OptionsToMatch.
EqualMatchers.push_back(ScanMatcher);
OptionsToMatch.erase(OptionsToMatch.begin()+Scan);
--e;
continue;
}
// If the option we're checking for contradicts the start of the list,
// skip over it.
if (Optn->isContradictory(ScanMatcher)) {
++Scan;
continue;
}
// If we're scanning for a simple node, see if it occurs later in the
// sequence. If so, and if we can move it up, it might be contradictory
// or the same as what we're looking for. If so, reorder it.
if (Optn->isSimplePredicateOrRecordNode()) {
Matcher *M2 = FindNodeWithKind(ScanMatcher, Optn->getKind());
if (M2 && M2 != ScanMatcher &&
M2->canMoveBefore(ScanMatcher) &&
(M2->isEqual(Optn) || M2->isContradictory(Optn))) {
Matcher *MatcherWithoutM2 = ScanMatcher->unlinkNode(M2);
M2->setNext(MatcherWithoutM2);
OptionsToMatch[Scan] = M2;
continue;
}
}
// Otherwise, we don't know how to handle this entry, we have to bail.
break;
}
if (Scan != e &&
// Don't print it's obvious nothing extra could be merged anyway.
Scan+1 != e) {
DEBUG(errs() << "Couldn't merge this:\n";
Optn->print(errs(), 4);
errs() << "into this:\n";
OptionsToMatch[Scan]->print(errs(), 4);
if (Scan+1 != e)
OptionsToMatch[Scan+1]->printOne(errs());
if (Scan+2 < e)
OptionsToMatch[Scan+2]->printOne(errs());
errs() << "\n");
}
// If we only found one option starting with this matcher, no factoring is
// possible.
if (EqualMatchers.size() == 1) {
NewOptionsToMatch.push_back(EqualMatchers[0]);
continue;
}
// Factor these checks by pulling the first node off each entry and
// discarding it. Take the first one off the first entry to reuse.
Matcher *Shared = Optn;
Optn = Optn->takeNext();
EqualMatchers[0] = Optn;
// Remove and delete the first node from the other matchers we're factoring.
for (unsigned i = 1, e = EqualMatchers.size(); i != e; ++i) {
Matcher *Tmp = EqualMatchers[i]->takeNext();
delete EqualMatchers[i];
EqualMatchers[i] = Tmp;
}
Shared->setNext(new ScopeMatcher(EqualMatchers));
// Recursively factor the newly created node.
FactorNodes(Shared->getNextPtr());
NewOptionsToMatch.push_back(Shared);
}
// If we're down to a single pattern to match, then we don't need this scope
// anymore.
if (NewOptionsToMatch.size() == 1) {
MatcherPtr.reset(NewOptionsToMatch[0]);
return;
}
if (NewOptionsToMatch.empty()) {
MatcherPtr.reset();
return;
}
// If our factoring failed (didn't achieve anything) see if we can simplify in
// other ways.
// Check to see if all of the leading entries are now opcode checks. If so,
// we can convert this Scope to be a OpcodeSwitch instead.
bool AllOpcodeChecks = true, AllTypeChecks = true;
for (unsigned i = 0, e = NewOptionsToMatch.size(); i != e; ++i) {
// Check to see if this breaks a series of CheckOpcodeMatchers.
if (AllOpcodeChecks &&
!isa<CheckOpcodeMatcher>(NewOptionsToMatch[i])) {
#if 0
if (i > 3) {
errs() << "FAILING OPC #" << i << "\n";
NewOptionsToMatch[i]->dump();
}
#endif
AllOpcodeChecks = false;
}
// Check to see if this breaks a series of CheckTypeMatcher's.
if (AllTypeChecks) {
CheckTypeMatcher *CTM =
cast_or_null<CheckTypeMatcher>(FindNodeWithKind(NewOptionsToMatch[i],
Matcher::CheckType));
if (!CTM ||
// iPTR checks could alias any other case without us knowing, don't
// bother with them.
CTM->getType() == MVT::iPTR ||
// SwitchType only works for result #0.
CTM->getResNo() != 0 ||
// If the CheckType isn't at the start of the list, see if we can move
// it there.
!CTM->canMoveBefore(NewOptionsToMatch[i])) {
#if 0
if (i > 3 && AllTypeChecks) {
errs() << "FAILING TYPE #" << i << "\n";
NewOptionsToMatch[i]->dump();
}
#endif
AllTypeChecks = false;
}
}
}
// If all the options are CheckOpcode's, we can form the SwitchOpcode, woot.
if (AllOpcodeChecks) {
StringSet<> Opcodes;
SmallVector<std::pair<const SDNodeInfo*, Matcher*>, 8> Cases;
for (unsigned i = 0, e = NewOptionsToMatch.size(); i != e; ++i) {
CheckOpcodeMatcher *COM = cast<CheckOpcodeMatcher>(NewOptionsToMatch[i]);
assert(Opcodes.insert(COM->getOpcode().getEnumName()).second &&
"Duplicate opcodes not factored?");
Cases.push_back(std::make_pair(&COM->getOpcode(), COM->getNext()));
}
MatcherPtr.reset(new SwitchOpcodeMatcher(Cases));
return;
}
// If all the options are CheckType's, we can form the SwitchType, woot.
if (AllTypeChecks) {
DenseMap<unsigned, unsigned> TypeEntry;
SmallVector<std::pair<MVT::SimpleValueType, Matcher*>, 8> Cases;
for (unsigned i = 0, e = NewOptionsToMatch.size(); i != e; ++i) {
CheckTypeMatcher *CTM =
cast_or_null<CheckTypeMatcher>(FindNodeWithKind(NewOptionsToMatch[i],
Matcher::CheckType));
Matcher *MatcherWithoutCTM = NewOptionsToMatch[i]->unlinkNode(CTM);
MVT::SimpleValueType CTMTy = CTM->getType();
delete CTM;
unsigned &Entry = TypeEntry[CTMTy];
if (Entry != 0) {
// If we have unfactored duplicate types, then we should factor them.
Matcher *PrevMatcher = Cases[Entry-1].second;
if (ScopeMatcher *SM = dyn_cast<ScopeMatcher>(PrevMatcher)) {
SM->setNumChildren(SM->getNumChildren()+1);
SM->resetChild(SM->getNumChildren()-1, MatcherWithoutCTM);
continue;
}
Matcher *Entries[2] = { PrevMatcher, MatcherWithoutCTM };
Cases[Entry-1].second = new ScopeMatcher(Entries);
continue;
}
Entry = Cases.size()+1;
Cases.push_back(std::make_pair(CTMTy, MatcherWithoutCTM));
}
if (Cases.size() != 1) {
MatcherPtr.reset(new SwitchTypeMatcher(Cases));
} else {
// If we factored and ended up with one case, create it now.
MatcherPtr.reset(new CheckTypeMatcher(Cases[0].first, 0));
MatcherPtr->setNext(Cases[0].second);
}
return;
}
// Reassemble the Scope node with the adjusted children.
Scope->setNumChildren(NewOptionsToMatch.size());
for (unsigned i = 0, e = NewOptionsToMatch.size(); i != e; ++i)
Scope->resetChild(i, NewOptionsToMatch[i]);
}
void
llvm::OptimizeMatcher(std::unique_ptr<Matcher> &MatcherPtr,
const CodeGenDAGPatterns &CGP) {
ContractNodes(MatcherPtr, CGP);
SinkPatternPredicates(MatcherPtr);
FactorNodes(MatcherPtr);
}
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/X86ModRMFilters.cpp | //===- X86ModRMFilters.cpp - Disassembler ModR/M filterss -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "X86ModRMFilters.h"
using namespace llvm::X86Disassembler;
void ModRMFilter::anchor() { }
void DumbFilter::anchor() { }
void ModFilter::anchor() { }
void ExtendedFilter::anchor() { }
void ExactFilter::anchor() { }
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/module.modulemap | module TableGen {
umbrella "."
module * { export * }
}
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/InstrInfoEmitter.cpp | //===- InstrInfoEmitter.cpp - Generate a Instruction Set Desc. ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This tablegen backend is responsible for emitting a description of the target
// instruction set for the code generator.
//
//===----------------------------------------------------------------------===//
#include "CodeGenDAGPatterns.h"
#include "CodeGenSchedule.h"
#include "CodeGenTarget.h"
#include "SequenceToOffsetTable.h"
#include "TableGenBackends.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
#include <cstdio>
#include <map>
#include <vector>
using namespace llvm;
namespace {
class InstrInfoEmitter {
RecordKeeper &Records;
CodeGenDAGPatterns CDP;
const CodeGenSchedModels &SchedModels;
public:
InstrInfoEmitter(RecordKeeper &R):
Records(R), CDP(R), SchedModels(CDP.getTargetInfo().getSchedModels()) {}
// run - Output the instruction set description.
void run(raw_ostream &OS);
private:
void emitEnums(raw_ostream &OS);
typedef std::map<std::vector<std::string>, unsigned> OperandInfoMapTy;
/// The keys of this map are maps which have OpName enum values as their keys
/// and instruction operand indices as their values. The values of this map
/// are lists of instruction names.
typedef std::map<std::map<unsigned, unsigned>,
std::vector<std::string> > OpNameMapTy;
typedef std::map<std::string, unsigned>::iterator StrUintMapIter;
void emitRecord(const CodeGenInstruction &Inst, unsigned Num,
Record *InstrInfo,
std::map<std::vector<Record*>, unsigned> &EL,
const OperandInfoMapTy &OpInfo,
raw_ostream &OS);
void emitOperandTypesEnum(raw_ostream &OS, const CodeGenTarget &Target);
void initOperandMapData(
const std::vector<const CodeGenInstruction *> &NumberedInstructions,
const std::string &Namespace,
std::map<std::string, unsigned> &Operands,
OpNameMapTy &OperandMap);
void emitOperandNameMappings(raw_ostream &OS, const CodeGenTarget &Target,
const std::vector<const CodeGenInstruction*> &NumberedInstructions);
// Operand information.
void EmitOperandInfo(raw_ostream &OS, OperandInfoMapTy &OperandInfoIDs);
std::vector<std::string> GetOperandInfo(const CodeGenInstruction &Inst);
};
} // End anonymous namespace
static void PrintDefList(const std::vector<Record*> &Uses,
unsigned Num, raw_ostream &OS) {
OS << "static const uint16_t ImplicitList" << Num << "[] = { ";
for (unsigned i = 0, e = Uses.size(); i != e; ++i)
OS << getQualifiedName(Uses[i]) << ", ";
OS << "0 };\n";
}
//===----------------------------------------------------------------------===//
// Operand Info Emission.
//===----------------------------------------------------------------------===//
std::vector<std::string>
InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) {
std::vector<std::string> Result;
for (auto &Op : Inst.Operands) {
// Handle aggregate operands and normal operands the same way by expanding
// either case into a list of operands for this op.
std::vector<CGIOperandList::OperandInfo> OperandList;
// This might be a multiple operand thing. Targets like X86 have
// registers in their multi-operand operands. It may also be an anonymous
// operand, which has a single operand, but no declared class for the
// operand.
DagInit *MIOI = Op.MIOperandInfo;
if (!MIOI || MIOI->getNumArgs() == 0) {
// Single, anonymous, operand.
OperandList.push_back(Op);
} else {
for (unsigned j = 0, e = Op.MINumOperands; j != e; ++j) {
OperandList.push_back(Op);
Record *OpR = cast<DefInit>(MIOI->getArg(j))->getDef();
OperandList.back().Rec = OpR;
}
}
for (unsigned j = 0, e = OperandList.size(); j != e; ++j) {
Record *OpR = OperandList[j].Rec;
std::string Res;
if (OpR->isSubClassOf("RegisterOperand"))
OpR = OpR->getValueAsDef("RegClass");
if (OpR->isSubClassOf("RegisterClass"))
Res += getQualifiedName(OpR) + "RegClassID, ";
else if (OpR->isSubClassOf("PointerLikeRegClass"))
Res += utostr(OpR->getValueAsInt("RegClassKind")) + ", ";
else
// -1 means the operand does not have a fixed register class.
Res += "-1, ";
// Fill in applicable flags.
Res += "0";
// Ptr value whose register class is resolved via callback.
if (OpR->isSubClassOf("PointerLikeRegClass"))
Res += "|(1<<MCOI::LookupPtrRegClass)";
// Predicate operands. Check to see if the original unexpanded operand
// was of type PredicateOp.
if (Op.Rec->isSubClassOf("PredicateOp"))
Res += "|(1<<MCOI::Predicate)";
// Optional def operands. Check to see if the original unexpanded operand
// was of type OptionalDefOperand.
if (Op.Rec->isSubClassOf("OptionalDefOperand"))
Res += "|(1<<MCOI::OptionalDef)";
// Fill in operand type.
Res += ", ";
assert(!Op.OperandType.empty() && "Invalid operand type.");
Res += Op.OperandType;
// Fill in constraint info.
Res += ", ";
const CGIOperandList::ConstraintInfo &Constraint =
Op.Constraints[j];
if (Constraint.isNone())
Res += "0";
else if (Constraint.isEarlyClobber())
Res += "(1 << MCOI::EARLY_CLOBBER)";
else {
assert(Constraint.isTied());
Res += "((" + utostr(Constraint.getTiedOperand()) +
" << 16) | (1 << MCOI::TIED_TO))";
}
Result.push_back(Res);
}
}
return Result;
}
void InstrInfoEmitter::EmitOperandInfo(raw_ostream &OS,
OperandInfoMapTy &OperandInfoIDs) {
// ID #0 is for no operand info.
unsigned OperandListNum = 0;
OperandInfoIDs[std::vector<std::string>()] = ++OperandListNum;
OS << "\n";
const CodeGenTarget &Target = CDP.getTargetInfo();
for (const CodeGenInstruction *Inst : Target.instructions()) {
std::vector<std::string> OperandInfo = GetOperandInfo(*Inst);
unsigned &N = OperandInfoIDs[OperandInfo];
if (N != 0) continue;
N = ++OperandListNum;
OS << "static const MCOperandInfo OperandInfo" << N << "[] = { ";
for (const std::string &Info : OperandInfo)
OS << "{ " << Info << " }, ";
OS << "};\n";
}
}
/// Initialize data structures for generating operand name mappings.
///
/// \param Operands [out] A map used to generate the OpName enum with operand
/// names as its keys and operand enum values as its values.
/// \param OperandMap [out] A map for representing the operand name mappings for
/// each instructions. This is used to generate the OperandMap table as
/// well as the getNamedOperandIdx() function.
void InstrInfoEmitter::initOperandMapData(
const std::vector<const CodeGenInstruction *> &NumberedInstructions,
const std::string &Namespace,
std::map<std::string, unsigned> &Operands,
OpNameMapTy &OperandMap) {
unsigned NumOperands = 0;
for (const CodeGenInstruction *Inst : NumberedInstructions) {
if (!Inst->TheDef->getValueAsBit("UseNamedOperandTable"))
continue;
std::map<unsigned, unsigned> OpList;
for (const auto &Info : Inst->Operands) {
StrUintMapIter I = Operands.find(Info.Name);
if (I == Operands.end()) {
I = Operands.insert(Operands.begin(),
std::pair<std::string, unsigned>(Info.Name, NumOperands++));
}
OpList[I->second] = Info.MIOperandNo;
}
OperandMap[OpList].push_back(Namespace + "::" + Inst->TheDef->getName());
}
}
/// Generate a table and function for looking up the indices of operands by
/// name.
///
/// This code generates:
/// - An enum in the llvm::TargetNamespace::OpName namespace, with one entry
/// for each operand name.
/// - A 2-dimensional table called OperandMap for mapping OpName enum values to
/// operand indices.
/// - A function called getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
/// for looking up the operand index for an instruction, given a value from
/// OpName enum
void InstrInfoEmitter::emitOperandNameMappings(raw_ostream &OS,
const CodeGenTarget &Target,
const std::vector<const CodeGenInstruction*> &NumberedInstructions) {
const std::string &Namespace = Target.getInstNamespace();
std::string OpNameNS = "OpName";
// Map of operand names to their enumeration value. This will be used to
// generate the OpName enum.
std::map<std::string, unsigned> Operands;
OpNameMapTy OperandMap;
initOperandMapData(NumberedInstructions, Namespace, Operands, OperandMap);
OS << "#ifdef GET_INSTRINFO_OPERAND_ENUM\n";
OS << "#undef GET_INSTRINFO_OPERAND_ENUM\n";
OS << "namespace llvm {\n";
OS << "namespace " << Namespace << " {\n";
OS << "namespace " << OpNameNS << " { \n";
OS << "enum {\n";
for (const auto &Op : Operands)
OS << " " << Op.first << " = " << Op.second << ",\n";
OS << "OPERAND_LAST";
OS << "\n};\n";
OS << "} // End namespace OpName\n";
OS << "} // End namespace " << Namespace << "\n";
OS << "} // End namespace llvm\n";
OS << "#endif //GET_INSTRINFO_OPERAND_ENUM\n";
OS << "#ifdef GET_INSTRINFO_NAMED_OPS\n";
OS << "#undef GET_INSTRINFO_NAMED_OPS\n";
OS << "namespace llvm {\n";
OS << "namespace " << Namespace << " {\n";
OS << "LLVM_READONLY\n";
OS << "int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx) {\n";
if (!Operands.empty()) {
OS << " static const int16_t OperandMap [][" << Operands.size()
<< "] = {\n";
for (const auto &Entry : OperandMap) {
const std::map<unsigned, unsigned> &OpList = Entry.first;
OS << "{";
// Emit a row of the OperandMap table
for (unsigned i = 0, e = Operands.size(); i != e; ++i)
OS << (OpList.count(i) == 0 ? -1 : (int)OpList.find(i)->second) << ", ";
OS << "},\n";
}
OS << "};\n";
OS << " switch(Opcode) {\n";
unsigned TableIndex = 0;
for (const auto &Entry : OperandMap) {
for (const std::string &Name : Entry.second)
OS << " case " << Name << ":\n";
OS << " return OperandMap[" << TableIndex++ << "][NamedIdx];\n";
}
OS << " default: return -1;\n";
OS << " }\n";
} else {
// There are no operands, so no need to emit anything
OS << " return -1;\n";
}
OS << "}\n";
OS << "} // End namespace " << Namespace << "\n";
OS << "} // End namespace llvm\n";
OS << "#endif //GET_INSTRINFO_NAMED_OPS\n";
}
/// Generate an enum for all the operand types for this target, under the
/// llvm::TargetNamespace::OpTypes namespace.
/// Operand types are all definitions derived of the Operand Target.td class.
void InstrInfoEmitter::emitOperandTypesEnum(raw_ostream &OS,
const CodeGenTarget &Target) {
const std::string &Namespace = Target.getInstNamespace();
std::vector<Record *> Operands = Records.getAllDerivedDefinitions("Operand");
OS << "\n#ifdef GET_INSTRINFO_OPERAND_TYPES_ENUM\n";
OS << "#undef GET_INSTRINFO_OPERAND_TYPES_ENUM\n";
OS << "namespace llvm {\n";
OS << "namespace " << Namespace << " {\n";
OS << "namespace OpTypes { \n";
OS << "enum OperandType {\n";
unsigned EnumVal = 0;
for (const Record *Op : Operands) {
if (!Op->isAnonymous())
OS << " " << Op->getName() << " = " << EnumVal << ",\n";
++EnumVal;
}
OS << " OPERAND_TYPE_LIST_END" << "\n};\n";
OS << "} // End namespace OpTypes\n";
OS << "} // End namespace " << Namespace << "\n";
OS << "} // End namespace llvm\n";
OS << "#endif // GET_INSTRINFO_OPERAND_TYPES_ENUM\n";
}
//===----------------------------------------------------------------------===//
// Main Output.
//===----------------------------------------------------------------------===//
// run - Emit the main instruction description records for the target...
void InstrInfoEmitter::run(raw_ostream &OS) {
emitSourceFileHeader("Target Instruction Enum Values", OS);
emitEnums(OS);
emitSourceFileHeader("Target Instruction Descriptors", OS);
OS << "\n#ifdef GET_INSTRINFO_MC_DESC\n";
OS << "#undef GET_INSTRINFO_MC_DESC\n";
OS << "namespace llvm {\n\n";
CodeGenTarget &Target = CDP.getTargetInfo();
const std::string &TargetName = Target.getName();
Record *InstrInfo = Target.getInstructionSet();
// Keep track of all of the def lists we have emitted already.
std::map<std::vector<Record*>, unsigned> EmittedLists;
unsigned ListNumber = 0;
// Emit all of the instruction's implicit uses and defs.
for (const CodeGenInstruction *II : Target.instructions()) {
Record *Inst = II->TheDef;
std::vector<Record*> Uses = Inst->getValueAsListOfDefs("Uses");
if (!Uses.empty()) {
unsigned &IL = EmittedLists[Uses];
if (!IL) PrintDefList(Uses, IL = ++ListNumber, OS);
}
std::vector<Record*> Defs = Inst->getValueAsListOfDefs("Defs");
if (!Defs.empty()) {
unsigned &IL = EmittedLists[Defs];
if (!IL) PrintDefList(Defs, IL = ++ListNumber, OS);
}
}
OperandInfoMapTy OperandInfoIDs;
// Emit all of the operand info records.
EmitOperandInfo(OS, OperandInfoIDs);
// Emit all of the MCInstrDesc records in their ENUM ordering.
//
OS << "\nextern const MCInstrDesc " << TargetName << "Insts[] = {\n";
const std::vector<const CodeGenInstruction*> &NumberedInstructions =
Target.getInstructionsByEnumValue();
SequenceToOffsetTable<std::string> InstrNames;
unsigned Num = 0;
for (const CodeGenInstruction *Inst : NumberedInstructions) {
// Keep a list of the instruction names.
InstrNames.add(Inst->TheDef->getName());
// Emit the record into the table.
emitRecord(*Inst, Num++, InstrInfo, EmittedLists, OperandInfoIDs, OS);
}
OS << "};\n\n";
// Emit the array of instruction names.
InstrNames.layout();
OS << "extern const char " << TargetName << "InstrNameData[] = {\n";
InstrNames.emit(OS, printChar);
OS << "};\n\n";
OS << "extern const unsigned " << TargetName <<"InstrNameIndices[] = {";
Num = 0;
for (const CodeGenInstruction *Inst : NumberedInstructions) {
// Newline every eight entries.
if (Num % 8 == 0)
OS << "\n ";
OS << InstrNames.get(Inst->TheDef->getName()) << "U, ";
++Num;
}
OS << "\n};\n\n";
// MCInstrInfo initialization routine.
OS << "static inline void Init" << TargetName
<< "MCInstrInfo(MCInstrInfo *II) {\n";
OS << " II->InitMCInstrInfo(" << TargetName << "Insts, "
<< TargetName << "InstrNameIndices, " << TargetName << "InstrNameData, "
<< NumberedInstructions.size() << ");\n}\n\n";
OS << "} // End llvm namespace \n";
OS << "#endif // GET_INSTRINFO_MC_DESC\n\n";
// Create a TargetInstrInfo subclass to hide the MC layer initialization.
OS << "\n#ifdef GET_INSTRINFO_HEADER\n";
OS << "#undef GET_INSTRINFO_HEADER\n";
std::string ClassName = TargetName + "GenInstrInfo";
OS << "namespace llvm {\n";
OS << "struct " << ClassName << " : public TargetInstrInfo {\n"
<< " explicit " << ClassName
<< "(int CFSetupOpcode = -1, int CFDestroyOpcode = -1);\n"
<< " virtual ~" << ClassName << "();\n"
<< "};\n";
OS << "} // End llvm namespace \n";
OS << "#endif // GET_INSTRINFO_HEADER\n\n";
OS << "\n#ifdef GET_INSTRINFO_CTOR_DTOR\n";
OS << "#undef GET_INSTRINFO_CTOR_DTOR\n";
OS << "namespace llvm {\n";
OS << "extern const MCInstrDesc " << TargetName << "Insts[];\n";
OS << "extern const unsigned " << TargetName << "InstrNameIndices[];\n";
OS << "extern const char " << TargetName << "InstrNameData[];\n";
OS << ClassName << "::" << ClassName
<< "(int CFSetupOpcode, int CFDestroyOpcode)\n"
<< " : TargetInstrInfo(CFSetupOpcode, CFDestroyOpcode) {\n"
<< " InitMCInstrInfo(" << TargetName << "Insts, " << TargetName
<< "InstrNameIndices, " << TargetName << "InstrNameData, "
<< NumberedInstructions.size() << ");\n}\n"
<< ClassName << "::~" << ClassName << "() {}\n";
OS << "} // End llvm namespace \n";
OS << "#endif // GET_INSTRINFO_CTOR_DTOR\n\n";
emitOperandNameMappings(OS, Target, NumberedInstructions);
emitOperandTypesEnum(OS, Target);
}
void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
Record *InstrInfo,
std::map<std::vector<Record*>, unsigned> &EmittedLists,
const OperandInfoMapTy &OpInfo,
raw_ostream &OS) {
int MinOperands = 0;
if (!Inst.Operands.empty())
// Each logical operand can be multiple MI operands.
MinOperands = Inst.Operands.back().MIOperandNo +
Inst.Operands.back().MINumOperands;
OS << " { ";
OS << Num << ",\t" << MinOperands << ",\t"
<< Inst.Operands.NumDefs << ",\t"
<< Inst.TheDef->getValueAsInt("Size") << ",\t"
<< SchedModels.getSchedClassIdx(Inst) << ",\t0";
// Emit all of the target independent flags...
if (Inst.isPseudo) OS << "|(1ULL<<MCID::Pseudo)";
if (Inst.isReturn) OS << "|(1ULL<<MCID::Return)";
if (Inst.isBranch) OS << "|(1ULL<<MCID::Branch)";
if (Inst.isIndirectBranch) OS << "|(1ULL<<MCID::IndirectBranch)";
if (Inst.isCompare) OS << "|(1ULL<<MCID::Compare)";
if (Inst.isMoveImm) OS << "|(1ULL<<MCID::MoveImm)";
if (Inst.isBitcast) OS << "|(1ULL<<MCID::Bitcast)";
if (Inst.isSelect) OS << "|(1ULL<<MCID::Select)";
if (Inst.isBarrier) OS << "|(1ULL<<MCID::Barrier)";
if (Inst.hasDelaySlot) OS << "|(1ULL<<MCID::DelaySlot)";
if (Inst.isCall) OS << "|(1ULL<<MCID::Call)";
if (Inst.canFoldAsLoad) OS << "|(1ULL<<MCID::FoldableAsLoad)";
if (Inst.mayLoad) OS << "|(1ULL<<MCID::MayLoad)";
if (Inst.mayStore) OS << "|(1ULL<<MCID::MayStore)";
if (Inst.isPredicable) OS << "|(1ULL<<MCID::Predicable)";
if (Inst.isConvertibleToThreeAddress) OS << "|(1ULL<<MCID::ConvertibleTo3Addr)";
if (Inst.isCommutable) OS << "|(1ULL<<MCID::Commutable)";
if (Inst.isTerminator) OS << "|(1ULL<<MCID::Terminator)";
if (Inst.isReMaterializable) OS << "|(1ULL<<MCID::Rematerializable)";
if (Inst.isNotDuplicable) OS << "|(1ULL<<MCID::NotDuplicable)";
if (Inst.Operands.hasOptionalDef) OS << "|(1ULL<<MCID::HasOptionalDef)";
if (Inst.usesCustomInserter) OS << "|(1ULL<<MCID::UsesCustomInserter)";
if (Inst.hasPostISelHook) OS << "|(1ULL<<MCID::HasPostISelHook)";
if (Inst.Operands.isVariadic)OS << "|(1ULL<<MCID::Variadic)";
if (Inst.hasSideEffects) OS << "|(1ULL<<MCID::UnmodeledSideEffects)";
if (Inst.isAsCheapAsAMove) OS << "|(1ULL<<MCID::CheapAsAMove)";
if (Inst.hasExtraSrcRegAllocReq) OS << "|(1ULL<<MCID::ExtraSrcRegAllocReq)";
if (Inst.hasExtraDefRegAllocReq) OS << "|(1ULL<<MCID::ExtraDefRegAllocReq)";
if (Inst.isRegSequence) OS << "|(1ULL<<MCID::RegSequence)";
if (Inst.isExtractSubreg) OS << "|(1ULL<<MCID::ExtractSubreg)";
if (Inst.isInsertSubreg) OS << "|(1ULL<<MCID::InsertSubreg)";
if (Inst.isConvergent) OS << "|(1ULL<<MCID::Convergent)";
// Emit all of the target-specific flags...
BitsInit *TSF = Inst.TheDef->getValueAsBitsInit("TSFlags");
if (!TSF)
PrintFatalError("no TSFlags?");
uint64_t Value = 0;
for (unsigned i = 0, e = TSF->getNumBits(); i != e; ++i) {
if (BitInit *Bit = dyn_cast<BitInit>(TSF->getBit(i)))
Value |= uint64_t(Bit->getValue()) << i;
else
PrintFatalError("Invalid TSFlags bit in " + Inst.TheDef->getName());
}
OS << ", 0x";
OS.write_hex(Value);
OS << "ULL, ";
// Emit the implicit uses and defs lists...
std::vector<Record*> UseList = Inst.TheDef->getValueAsListOfDefs("Uses");
if (UseList.empty())
OS << "nullptr, ";
else
OS << "ImplicitList" << EmittedLists[UseList] << ", ";
std::vector<Record*> DefList = Inst.TheDef->getValueAsListOfDefs("Defs");
if (DefList.empty())
OS << "nullptr, ";
else
OS << "ImplicitList" << EmittedLists[DefList] << ", ";
// Emit the operand info.
std::vector<std::string> OperandInfo = GetOperandInfo(Inst);
if (OperandInfo.empty())
OS << "nullptr";
else
OS << "OperandInfo" << OpInfo.find(OperandInfo)->second;
CodeGenTarget &Target = CDP.getTargetInfo();
if (Inst.HasComplexDeprecationPredicate)
// Emit a function pointer to the complex predicate method.
OS << ", -1 "
<< ",&get" << Inst.DeprecatedReason << "DeprecationInfo";
else if (!Inst.DeprecatedReason.empty())
// Emit the Subtarget feature.
OS << ", " << Target.getInstNamespace() << "::" << Inst.DeprecatedReason
<< " ,nullptr";
else
// Instruction isn't deprecated.
OS << ", -1 ,nullptr";
OS << " }, // Inst #" << Num << " = " << Inst.TheDef->getName() << "\n";
}
// emitEnums - Print out enum values for all of the instructions.
void InstrInfoEmitter::emitEnums(raw_ostream &OS) {
OS << "\n#ifdef GET_INSTRINFO_ENUM\n";
OS << "#undef GET_INSTRINFO_ENUM\n";
OS << "namespace llvm {\n\n";
CodeGenTarget Target(Records);
// We must emit the PHI opcode first...
std::string Namespace = Target.getInstNamespace();
if (Namespace.empty())
PrintFatalError("No instructions defined!");
const std::vector<const CodeGenInstruction*> &NumberedInstructions =
Target.getInstructionsByEnumValue();
OS << "namespace " << Namespace << " {\n";
OS << " enum {\n";
unsigned Num = 0;
for (const CodeGenInstruction *Inst : NumberedInstructions)
OS << " " << Inst->TheDef->getName() << "\t= " << Num++ << ",\n";
OS << " INSTRUCTION_LIST_END = " << NumberedInstructions.size() << "\n";
OS << " };\n\n";
OS << "namespace Sched {\n";
OS << " enum {\n";
Num = 0;
for (const auto &Class : SchedModels.explicit_classes())
OS << " " << Class.Name << "\t= " << Num++ << ",\n";
OS << " SCHED_LIST_END = " << SchedModels.numInstrSchedClasses() << "\n";
OS << " };\n";
OS << "} // End Sched namespace\n";
OS << "} // End " << Namespace << " namespace\n";
OS << "} // End llvm namespace \n";
OS << "#endif // GET_INSTRINFO_ENUM\n\n";
}
namespace llvm {
void EmitInstrInfo(RecordKeeper &RK, raw_ostream &OS) {
InstrInfoEmitter(RK).run(OS);
EmitMapTable(RK, OS);
}
} // End llvm namespace
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/AsmWriterInst.cpp | //===- AsmWriterInst.h - Classes encapsulating a printable inst -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// These classes implement a parser for assembly strings.
//
//===----------------------------------------------------------------------===//
#include "AsmWriterInst.h"
#include "CodeGenTarget.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
using namespace llvm;
static bool isIdentChar(char C) {
return (C >= 'a' && C <= 'z') ||
(C >= 'A' && C <= 'Z') ||
(C >= '0' && C <= '9') ||
C == '_';
}
std::string AsmWriterOperand::getCode() const {
if (OperandType == isLiteralTextOperand) {
if (Str.size() == 1)
return "O << '" + Str + "'; ";
return "O << \"" + Str + "\"; ";
}
if (OperandType == isLiteralStatementOperand)
return Str;
std::string Result = Str + "(MI";
if (MIOpNo != ~0U)
Result += ", " + utostr(MIOpNo);
if (PassSubtarget)
Result += ", STI";
Result += ", O";
if (!MiModifier.empty())
Result += ", \"" + MiModifier + '"';
return Result + "); ";
}
/// ParseAsmString - Parse the specified Instruction's AsmString into this
/// AsmWriterInst.
///
AsmWriterInst::AsmWriterInst(const CodeGenInstruction &CGI, unsigned Variant,
unsigned PassSubtarget) {
this->CGI = &CGI;
// NOTE: Any extensions to this code need to be mirrored in the
// AsmPrinter::printInlineAsm code that executes as compile time (assuming
// that inline asm strings should also get the new feature)!
std::string AsmString = CGI.FlattenAsmStringVariants(CGI.AsmString, Variant);
std::string::size_type LastEmitted = 0;
while (LastEmitted != AsmString.size()) {
std::string::size_type DollarPos =
AsmString.find_first_of("$\\", LastEmitted);
if (DollarPos == std::string::npos) DollarPos = AsmString.size();
// Emit a constant string fragment.
if (DollarPos != LastEmitted) {
for (; LastEmitted != DollarPos; ++LastEmitted)
switch (AsmString[LastEmitted]) {
case '\n':
AddLiteralString("\\n");
break;
case '\t':
AddLiteralString("\\t");
break;
case '"':
AddLiteralString("\\\"");
break;
case '\\':
AddLiteralString("\\\\");
break;
default:
AddLiteralString(std::string(1, AsmString[LastEmitted]));
break;
}
} else if (AsmString[DollarPos] == '\\') {
if (DollarPos+1 != AsmString.size()) {
if (AsmString[DollarPos+1] == 'n') {
AddLiteralString("\\n");
} else if (AsmString[DollarPos+1] == 't') {
AddLiteralString("\\t");
} else if (std::string("${|}\\").find(AsmString[DollarPos+1])
!= std::string::npos) {
AddLiteralString(std::string(1, AsmString[DollarPos+1]));
} else {
PrintFatalError("Non-supported escaped character found in instruction '" +
CGI.TheDef->getName() + "'!");
}
LastEmitted = DollarPos+2;
continue;
}
} else if (DollarPos+1 != AsmString.size() &&
AsmString[DollarPos+1] == '$') {
AddLiteralString("$"); // "$$" -> $
LastEmitted = DollarPos+2;
} else {
// Get the name of the variable.
std::string::size_type VarEnd = DollarPos+1;
// handle ${foo}bar as $foo by detecting whether the character following
// the dollar sign is a curly brace. If so, advance VarEnd and DollarPos
// so the variable name does not contain the leading curly brace.
bool hasCurlyBraces = false;
if (VarEnd < AsmString.size() && '{' == AsmString[VarEnd]) {
hasCurlyBraces = true;
++DollarPos;
++VarEnd;
}
while (VarEnd < AsmString.size() && isIdentChar(AsmString[VarEnd]))
++VarEnd;
std::string VarName(AsmString.begin()+DollarPos+1,
AsmString.begin()+VarEnd);
// Modifier - Support ${foo:modifier} syntax, where "modifier" is passed
// into printOperand. Also support ${:feature}, which is passed into
// PrintSpecial.
std::string Modifier;
// In order to avoid starting the next string at the terminating curly
// brace, advance the end position past it if we found an opening curly
// brace.
if (hasCurlyBraces) {
if (VarEnd >= AsmString.size())
PrintFatalError("Reached end of string before terminating curly brace in '"
+ CGI.TheDef->getName() + "'");
// Look for a modifier string.
if (AsmString[VarEnd] == ':') {
++VarEnd;
if (VarEnd >= AsmString.size())
PrintFatalError("Reached end of string before terminating curly brace in '"
+ CGI.TheDef->getName() + "'");
unsigned ModifierStart = VarEnd;
while (VarEnd < AsmString.size() && isIdentChar(AsmString[VarEnd]))
++VarEnd;
Modifier = std::string(AsmString.begin()+ModifierStart,
AsmString.begin()+VarEnd);
if (Modifier.empty())
PrintFatalError("Bad operand modifier name in '"+ CGI.TheDef->getName() + "'");
}
if (AsmString[VarEnd] != '}')
PrintFatalError("Variable name beginning with '{' did not end with '}' in '"
+ CGI.TheDef->getName() + "'");
++VarEnd;
}
if (VarName.empty() && Modifier.empty())
PrintFatalError("Stray '$' in '" + CGI.TheDef->getName() +
"' asm string, maybe you want $$?");
if (VarName.empty()) {
// Just a modifier, pass this into PrintSpecial.
Operands.emplace_back("PrintSpecial", ~0U, ~0U, Modifier,
PassSubtarget);
} else {
// Otherwise, normal operand.
unsigned OpNo = CGI.Operands.getOperandNamed(VarName);
CGIOperandList::OperandInfo OpInfo = CGI.Operands[OpNo];
unsigned MIOp = OpInfo.MIOperandNo;
Operands.emplace_back(OpInfo.PrinterMethodName, OpNo, MIOp, Modifier,
PassSubtarget);
}
LastEmitted = VarEnd;
}
}
Operands.emplace_back("return;", AsmWriterOperand::isLiteralStatementOperand);
}
/// MatchesAllButOneOp - If this instruction is exactly identical to the
/// specified instruction except for one differing operand, return the differing
/// operand number. If more than one operand mismatches, return ~1, otherwise
/// if the instructions are identical return ~0.
unsigned AsmWriterInst::MatchesAllButOneOp(const AsmWriterInst &Other)const{
if (Operands.size() != Other.Operands.size()) return ~1;
unsigned MismatchOperand = ~0U;
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
if (Operands[i] != Other.Operands[i]) {
if (MismatchOperand != ~0U) // Already have one mismatch?
return ~1U;
MismatchOperand = i;
}
}
return MismatchOperand;
}
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/DAGISelMatcherGen.cpp | //===- DAGISelMatcherGen.cpp - Matcher generator --------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "DAGISelMatcher.h"
#include "CodeGenDAGPatterns.h"
#include "CodeGenRegisters.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include <utility>
using namespace llvm;
/// getRegisterValueType - Look up and return the ValueType of the specified
/// register. If the register is a member of multiple register classes which
/// have different associated types, return MVT::Other.
static MVT::SimpleValueType getRegisterValueType(Record *R,
const CodeGenTarget &T) {
bool FoundRC = false;
MVT::SimpleValueType VT = MVT::Other;
const CodeGenRegister *Reg = T.getRegBank().getReg(R);
for (const auto &RC : T.getRegBank().getRegClasses()) {
if (!RC.contains(Reg))
continue;
if (!FoundRC) {
FoundRC = true;
VT = RC.getValueTypeNum(0);
continue;
}
// If this occurs in multiple register classes, they all have to agree.
assert(VT == RC.getValueTypeNum(0));
}
return VT;
}
namespace {
class MatcherGen {
const PatternToMatch &Pattern;
const CodeGenDAGPatterns &CGP;
/// PatWithNoTypes - This is a clone of Pattern.getSrcPattern() that starts
/// out with all of the types removed. This allows us to insert type checks
/// as we scan the tree.
TreePatternNode *PatWithNoTypes;
/// VariableMap - A map from variable names ('$dst') to the recorded operand
/// number that they were captured as. These are biased by 1 to make
/// insertion easier.
StringMap<unsigned> VariableMap;
/// This maintains the recorded operand number that OPC_CheckComplexPattern
/// drops each sub-operand into. We don't want to insert these into
/// VariableMap because that leads to identity checking if they are
/// encountered multiple times. Biased by 1 like VariableMap for
/// consistency.
StringMap<unsigned> NamedComplexPatternOperands;
/// NextRecordedOperandNo - As we emit opcodes to record matched values in
/// the RecordedNodes array, this keeps track of which slot will be next to
/// record into.
unsigned NextRecordedOperandNo;
/// MatchedChainNodes - This maintains the position in the recorded nodes
/// array of all of the recorded input nodes that have chains.
SmallVector<unsigned, 2> MatchedChainNodes;
/// MatchedGlueResultNodes - This maintains the position in the recorded
/// nodes array of all of the recorded input nodes that have glue results.
SmallVector<unsigned, 2> MatchedGlueResultNodes;
/// MatchedComplexPatterns - This maintains a list of all of the
/// ComplexPatterns that we need to check. The second element of each pair
/// is the recorded operand number of the input node.
SmallVector<std::pair<const TreePatternNode*,
unsigned>, 2> MatchedComplexPatterns;
/// PhysRegInputs - List list has an entry for each explicitly specified
/// physreg input to the pattern. The first elt is the Register node, the
/// second is the recorded slot number the input pattern match saved it in.
SmallVector<std::pair<Record*, unsigned>, 2> PhysRegInputs;
/// Matcher - This is the top level of the generated matcher, the result.
Matcher *TheMatcher;
/// CurPredicate - As we emit matcher nodes, this points to the latest check
/// which should have future checks stuck into its Next position.
Matcher *CurPredicate;
public:
MatcherGen(const PatternToMatch &pattern, const CodeGenDAGPatterns &cgp);
~MatcherGen() {
delete PatWithNoTypes;
}
bool EmitMatcherCode(unsigned Variant);
void EmitResultCode();
Matcher *GetMatcher() const { return TheMatcher; }
private:
void AddMatcher(Matcher *NewNode);
void InferPossibleTypes();
// Matcher Generation.
void EmitMatchCode(const TreePatternNode *N, TreePatternNode *NodeNoTypes);
void EmitLeafMatchCode(const TreePatternNode *N);
void EmitOperatorMatchCode(const TreePatternNode *N,
TreePatternNode *NodeNoTypes);
/// If this is the first time a node with unique identifier Name has been
/// seen, record it. Otherwise, emit a check to make sure this is the same
/// node. Returns true if this is the first encounter.
bool recordUniqueNode(std::string Name);
// Result Code Generation.
unsigned getNamedArgumentSlot(StringRef Name) {
unsigned VarMapEntry = VariableMap[Name];
assert(VarMapEntry != 0 &&
"Variable referenced but not defined and not caught earlier!");
return VarMapEntry-1;
}
/// GetInstPatternNode - Get the pattern for an instruction.
const TreePatternNode *GetInstPatternNode(const DAGInstruction &Ins,
const TreePatternNode *N);
void EmitResultOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps);
void EmitResultOfNamedOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps);
void EmitResultLeafAsOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps);
void EmitResultInstructionAsOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps);
void EmitResultSDNodeXFormAsOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps);
};
} // end anon namespace.
MatcherGen::MatcherGen(const PatternToMatch &pattern,
const CodeGenDAGPatterns &cgp)
: Pattern(pattern), CGP(cgp), NextRecordedOperandNo(0),
TheMatcher(nullptr), CurPredicate(nullptr) {
// We need to produce the matcher tree for the patterns source pattern. To do
// this we need to match the structure as well as the types. To do the type
// matching, we want to figure out the fewest number of type checks we need to
// emit. For example, if there is only one integer type supported by a
// target, there should be no type comparisons at all for integer patterns!
//
// To figure out the fewest number of type checks needed, clone the pattern,
// remove the types, then perform type inference on the pattern as a whole.
// If there are unresolved types, emit an explicit check for those types,
// apply the type to the tree, then rerun type inference. Iterate until all
// types are resolved.
//
PatWithNoTypes = Pattern.getSrcPattern()->clone();
PatWithNoTypes->RemoveAllTypes();
// If there are types that are manifestly known, infer them.
InferPossibleTypes();
}
/// InferPossibleTypes - As we emit the pattern, we end up generating type
/// checks and applying them to the 'PatWithNoTypes' tree. As we do this, we
/// want to propagate implied types as far throughout the tree as possible so
/// that we avoid doing redundant type checks. This does the type propagation.
void MatcherGen::InferPossibleTypes() {
// TP - Get *SOME* tree pattern, we don't care which. It is only used for
// diagnostics, which we know are impossible at this point.
TreePattern &TP = *CGP.pf_begin()->second;
bool MadeChange = true;
while (MadeChange)
MadeChange = PatWithNoTypes->ApplyTypeConstraints(TP,
true/*Ignore reg constraints*/);
}
/// AddMatcher - Add a matcher node to the current graph we're building.
void MatcherGen::AddMatcher(Matcher *NewNode) {
if (CurPredicate)
CurPredicate->setNext(NewNode);
else
TheMatcher = NewNode;
CurPredicate = NewNode;
}
//===----------------------------------------------------------------------===//
// Pattern Match Generation
//===----------------------------------------------------------------------===//
/// EmitLeafMatchCode - Generate matching code for leaf nodes.
void MatcherGen::EmitLeafMatchCode(const TreePatternNode *N) {
assert(N->isLeaf() && "Not a leaf?");
// Direct match against an integer constant.
if (IntInit *II = dyn_cast<IntInit>(N->getLeafValue())) {
// If this is the root of the dag we're matching, we emit a redundant opcode
// check to ensure that this gets folded into the normal top-level
// OpcodeSwitch.
if (N == Pattern.getSrcPattern()) {
const SDNodeInfo &NI = CGP.getSDNodeInfo(CGP.getSDNodeNamed("imm"));
AddMatcher(new CheckOpcodeMatcher(NI));
}
return AddMatcher(new CheckIntegerMatcher(II->getValue()));
}
// An UnsetInit represents a named node without any constraints.
if (isa<UnsetInit>(N->getLeafValue())) {
assert(N->hasName() && "Unnamed ? leaf");
return;
}
DefInit *DI = dyn_cast<DefInit>(N->getLeafValue());
if (!DI) {
errs() << "Unknown leaf kind: " << *N << "\n";
abort();
}
Record *LeafRec = DI->getDef();
// A ValueType leaf node can represent a register when named, or itself when
// unnamed.
if (LeafRec->isSubClassOf("ValueType")) {
// A named ValueType leaf always matches: (add i32:$a, i32:$b).
if (N->hasName())
return;
// An unnamed ValueType as in (sext_inreg GPR:$foo, i8).
return AddMatcher(new CheckValueTypeMatcher(LeafRec->getName()));
}
if (// Handle register references. Nothing to do here, they always match.
LeafRec->isSubClassOf("RegisterClass") ||
LeafRec->isSubClassOf("RegisterOperand") ||
LeafRec->isSubClassOf("PointerLikeRegClass") ||
LeafRec->isSubClassOf("SubRegIndex") ||
// Place holder for SRCVALUE nodes. Nothing to do here.
LeafRec->getName() == "srcvalue")
return;
// If we have a physreg reference like (mul gpr:$src, EAX) then we need to
// record the register
if (LeafRec->isSubClassOf("Register")) {
AddMatcher(new RecordMatcher("physreg input "+LeafRec->getName(),
NextRecordedOperandNo));
PhysRegInputs.push_back(std::make_pair(LeafRec, NextRecordedOperandNo++));
return;
}
if (LeafRec->isSubClassOf("CondCode"))
return AddMatcher(new CheckCondCodeMatcher(LeafRec->getName()));
if (LeafRec->isSubClassOf("ComplexPattern")) {
// We can't model ComplexPattern uses that don't have their name taken yet.
// The OPC_CheckComplexPattern operation implicitly records the results.
if (N->getName().empty()) {
std::string S;
raw_string_ostream OS(S);
OS << "We expect complex pattern uses to have names: " << *N;
PrintFatalError(OS.str());
}
// Remember this ComplexPattern so that we can emit it after all the other
// structural matches are done.
unsigned InputOperand = VariableMap[N->getName()] - 1;
MatchedComplexPatterns.push_back(std::make_pair(N, InputOperand));
return;
}
errs() << "Unknown leaf kind: " << *N << "\n";
abort();
}
void MatcherGen::EmitOperatorMatchCode(const TreePatternNode *N,
TreePatternNode *NodeNoTypes) {
assert(!N->isLeaf() && "Not an operator?");
if (N->getOperator()->isSubClassOf("ComplexPattern")) {
// The "name" of a non-leaf complex pattern (MY_PAT $op1, $op2) is
// "MY_PAT:op1:op2". We should already have validated that the uses are
// consistent.
std::string PatternName = N->getOperator()->getName();
for (unsigned i = 0; i < N->getNumChildren(); ++i) {
PatternName += ":";
PatternName += N->getChild(i)->getName();
}
if (recordUniqueNode(PatternName)) {
auto NodeAndOpNum = std::make_pair(N, NextRecordedOperandNo - 1);
MatchedComplexPatterns.push_back(NodeAndOpNum);
}
return;
}
const SDNodeInfo &CInfo = CGP.getSDNodeInfo(N->getOperator());
// If this is an 'and R, 1234' where the operation is AND/OR and the RHS is
// a constant without a predicate fn that has more that one bit set, handle
// this as a special case. This is usually for targets that have special
// handling of certain large constants (e.g. alpha with it's 8/16/32-bit
// handling stuff). Using these instructions is often far more efficient
// than materializing the constant. Unfortunately, both the instcombiner
// and the dag combiner can often infer that bits are dead, and thus drop
// them from the mask in the dag. For example, it might turn 'AND X, 255'
// into 'AND X, 254' if it knows the low bit is set. Emit code that checks
// to handle this.
if ((N->getOperator()->getName() == "and" ||
N->getOperator()->getName() == "or") &&
N->getChild(1)->isLeaf() && N->getChild(1)->getPredicateFns().empty() &&
N->getPredicateFns().empty()) {
if (IntInit *II = dyn_cast<IntInit>(N->getChild(1)->getLeafValue())) {
if (!isPowerOf2_32(II->getValue())) { // Don't bother with single bits.
// If this is at the root of the pattern, we emit a redundant
// CheckOpcode so that the following checks get factored properly under
// a single opcode check.
if (N == Pattern.getSrcPattern())
AddMatcher(new CheckOpcodeMatcher(CInfo));
// Emit the CheckAndImm/CheckOrImm node.
if (N->getOperator()->getName() == "and")
AddMatcher(new CheckAndImmMatcher(II->getValue()));
else
AddMatcher(new CheckOrImmMatcher(II->getValue()));
// Match the LHS of the AND as appropriate.
AddMatcher(new MoveChildMatcher(0));
EmitMatchCode(N->getChild(0), NodeNoTypes->getChild(0));
AddMatcher(new MoveParentMatcher());
return;
}
}
}
// Check that the current opcode lines up.
AddMatcher(new CheckOpcodeMatcher(CInfo));
// If this node has memory references (i.e. is a load or store), tell the
// interpreter to capture them in the memref array.
if (N->NodeHasProperty(SDNPMemOperand, CGP))
AddMatcher(new RecordMemRefMatcher());
// If this node has a chain, then the chain is operand #0 is the SDNode, and
// the child numbers of the node are all offset by one.
unsigned OpNo = 0;
if (N->NodeHasProperty(SDNPHasChain, CGP)) {
// Record the node and remember it in our chained nodes list.
AddMatcher(new RecordMatcher("'" + N->getOperator()->getName() +
"' chained node",
NextRecordedOperandNo));
// Remember all of the input chains our pattern will match.
MatchedChainNodes.push_back(NextRecordedOperandNo++);
// Don't look at the input chain when matching the tree pattern to the
// SDNode.
OpNo = 1;
// If this node is not the root and the subtree underneath it produces a
// chain, then the result of matching the node is also produce a chain.
// Beyond that, this means that we're also folding (at least) the root node
// into the node that produce the chain (for example, matching
// "(add reg, (load ptr))" as a add_with_memory on X86). This is
// problematic, if the 'reg' node also uses the load (say, its chain).
// Graphically:
//
// [LD]
// ^ ^
// | \ DAG's like cheese.
// / |
// / [YY]
// | ^
// [XX]--/
//
// It would be invalid to fold XX and LD. In this case, folding the two
// nodes together would induce a cycle in the DAG, making it a 'cyclic DAG'
// To prevent this, we emit a dynamic check for legality before allowing
// this to be folded.
//
const TreePatternNode *Root = Pattern.getSrcPattern();
if (N != Root) { // Not the root of the pattern.
// If there is a node between the root and this node, then we definitely
// need to emit the check.
bool NeedCheck = !Root->hasChild(N);
// If it *is* an immediate child of the root, we can still need a check if
// the root SDNode has multiple inputs. For us, this means that it is an
// intrinsic, has multiple operands, or has other inputs like chain or
// glue).
if (!NeedCheck) {
const SDNodeInfo &PInfo = CGP.getSDNodeInfo(Root->getOperator());
NeedCheck =
Root->getOperator() == CGP.get_intrinsic_void_sdnode() ||
Root->getOperator() == CGP.get_intrinsic_w_chain_sdnode() ||
Root->getOperator() == CGP.get_intrinsic_wo_chain_sdnode() ||
PInfo.getNumOperands() > 1 ||
PInfo.hasProperty(SDNPHasChain) ||
PInfo.hasProperty(SDNPInGlue) ||
PInfo.hasProperty(SDNPOptInGlue);
}
if (NeedCheck)
AddMatcher(new CheckFoldableChainNodeMatcher());
}
}
// If this node has an output glue and isn't the root, remember it.
if (N->NodeHasProperty(SDNPOutGlue, CGP) &&
N != Pattern.getSrcPattern()) {
// TODO: This redundantly records nodes with both glues and chains.
// Record the node and remember it in our chained nodes list.
AddMatcher(new RecordMatcher("'" + N->getOperator()->getName() +
"' glue output node",
NextRecordedOperandNo));
// Remember all of the nodes with output glue our pattern will match.
MatchedGlueResultNodes.push_back(NextRecordedOperandNo++);
}
// If this node is known to have an input glue or if it *might* have an input
// glue, capture it as the glue input of the pattern.
if (N->NodeHasProperty(SDNPOptInGlue, CGP) ||
N->NodeHasProperty(SDNPInGlue, CGP))
AddMatcher(new CaptureGlueInputMatcher());
for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i, ++OpNo) {
// Get the code suitable for matching this child. Move to the child, check
// it then move back to the parent.
AddMatcher(new MoveChildMatcher(OpNo));
EmitMatchCode(N->getChild(i), NodeNoTypes->getChild(i));
AddMatcher(new MoveParentMatcher());
}
}
bool MatcherGen::recordUniqueNode(std::string Name) {
unsigned &VarMapEntry = VariableMap[Name];
if (VarMapEntry == 0) {
// If it is a named node, we must emit a 'Record' opcode.
AddMatcher(new RecordMatcher("$" + Name, NextRecordedOperandNo));
VarMapEntry = ++NextRecordedOperandNo;
return true;
}
// If we get here, this is a second reference to a specific name. Since
// we already have checked that the first reference is valid, we don't
// have to recursively match it, just check that it's the same as the
// previously named thing.
AddMatcher(new CheckSameMatcher(VarMapEntry-1));
return false;
}
void MatcherGen::EmitMatchCode(const TreePatternNode *N,
TreePatternNode *NodeNoTypes) {
// If N and NodeNoTypes don't agree on a type, then this is a case where we
// need to do a type check. Emit the check, apply the type to NodeNoTypes and
// reinfer any correlated types.
SmallVector<unsigned, 2> ResultsToTypeCheck;
for (unsigned i = 0, e = NodeNoTypes->getNumTypes(); i != e; ++i) {
if (NodeNoTypes->getExtType(i) == N->getExtType(i)) continue;
NodeNoTypes->setType(i, N->getExtType(i));
InferPossibleTypes();
ResultsToTypeCheck.push_back(i);
}
// If this node has a name associated with it, capture it in VariableMap. If
// we already saw this in the pattern, emit code to verify dagness.
if (!N->getName().empty())
if (!recordUniqueNode(N->getName()))
return;
if (N->isLeaf())
EmitLeafMatchCode(N);
else
EmitOperatorMatchCode(N, NodeNoTypes);
// If there are node predicates for this node, generate their checks.
for (unsigned i = 0, e = N->getPredicateFns().size(); i != e; ++i)
AddMatcher(new CheckPredicateMatcher(N->getPredicateFns()[i]));
for (unsigned i = 0, e = ResultsToTypeCheck.size(); i != e; ++i)
AddMatcher(new CheckTypeMatcher(N->getType(ResultsToTypeCheck[i]),
ResultsToTypeCheck[i]));
}
/// EmitMatcherCode - Generate the code that matches the predicate of this
/// pattern for the specified Variant. If the variant is invalid this returns
/// true and does not generate code, if it is valid, it returns false.
bool MatcherGen::EmitMatcherCode(unsigned Variant) {
// If the root of the pattern is a ComplexPattern and if it is specified to
// match some number of root opcodes, these are considered to be our variants.
// Depending on which variant we're generating code for, emit the root opcode
// check.
if (const ComplexPattern *CP =
Pattern.getSrcPattern()->getComplexPatternInfo(CGP)) {
const std::vector<Record*> &OpNodes = CP->getRootNodes();
assert(!OpNodes.empty() &&"Complex Pattern must specify what it can match");
if (Variant >= OpNodes.size()) return true;
AddMatcher(new CheckOpcodeMatcher(CGP.getSDNodeInfo(OpNodes[Variant])));
} else {
if (Variant != 0) return true;
}
// Emit the matcher for the pattern structure and types.
EmitMatchCode(Pattern.getSrcPattern(), PatWithNoTypes);
// If the pattern has a predicate on it (e.g. only enabled when a subtarget
// feature is around, do the check).
if (!Pattern.getPredicateCheck().empty())
AddMatcher(new CheckPatternPredicateMatcher(Pattern.getPredicateCheck()));
// Now that we've completed the structural type match, emit any ComplexPattern
// checks (e.g. addrmode matches). We emit this after the structural match
// because they are generally more expensive to evaluate and more difficult to
// factor.
for (unsigned i = 0, e = MatchedComplexPatterns.size(); i != e; ++i) {
const TreePatternNode *N = MatchedComplexPatterns[i].first;
// Remember where the results of this match get stuck.
if (N->isLeaf()) {
NamedComplexPatternOperands[N->getName()] = NextRecordedOperandNo + 1;
} else {
unsigned CurOp = NextRecordedOperandNo;
for (unsigned i = 0; i < N->getNumChildren(); ++i) {
NamedComplexPatternOperands[N->getChild(i)->getName()] = CurOp + 1;
CurOp += N->getChild(i)->getNumMIResults(CGP);
}
}
// Get the slot we recorded the value in from the name on the node.
unsigned RecNodeEntry = MatchedComplexPatterns[i].second;
const ComplexPattern &CP = *N->getComplexPatternInfo(CGP);
// Emit a CheckComplexPat operation, which does the match (aborting if it
// fails) and pushes the matched operands onto the recorded nodes list.
AddMatcher(new CheckComplexPatMatcher(CP, RecNodeEntry,
N->getName(), NextRecordedOperandNo));
// Record the right number of operands.
NextRecordedOperandNo += CP.getNumOperands();
if (CP.hasProperty(SDNPHasChain)) {
// If the complex pattern has a chain, then we need to keep track of the
// fact that we just recorded a chain input. The chain input will be
// matched as the last operand of the predicate if it was successful.
++NextRecordedOperandNo; // Chained node operand.
// It is the last operand recorded.
assert(NextRecordedOperandNo > 1 &&
"Should have recorded input/result chains at least!");
MatchedChainNodes.push_back(NextRecordedOperandNo-1);
}
// TODO: Complex patterns can't have output glues, if they did, we'd want
// to record them.
}
return false;
}
//===----------------------------------------------------------------------===//
// Node Result Generation
//===----------------------------------------------------------------------===//
void MatcherGen::EmitResultOfNamedOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps){
assert(!N->getName().empty() && "Operand not named!");
if (unsigned SlotNo = NamedComplexPatternOperands[N->getName()]) {
// Complex operands have already been completely selected, just find the
// right slot ant add the arguments directly.
for (unsigned i = 0; i < N->getNumMIResults(CGP); ++i)
ResultOps.push_back(SlotNo - 1 + i);
return;
}
unsigned SlotNo = getNamedArgumentSlot(N->getName());
// If this is an 'imm' or 'fpimm' node, make sure to convert it to the target
// version of the immediate so that it doesn't get selected due to some other
// node use.
if (!N->isLeaf()) {
StringRef OperatorName = N->getOperator()->getName();
if (OperatorName == "imm" || OperatorName == "fpimm") {
AddMatcher(new EmitConvertToTargetMatcher(SlotNo));
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
}
for (unsigned i = 0; i < N->getNumMIResults(CGP); ++i)
ResultOps.push_back(SlotNo + i);
}
void MatcherGen::EmitResultLeafAsOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps) {
assert(N->isLeaf() && "Must be a leaf");
if (IntInit *II = dyn_cast<IntInit>(N->getLeafValue())) {
AddMatcher(new EmitIntegerMatcher(II->getValue(), N->getType(0)));
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
// If this is an explicit register reference, handle it.
if (DefInit *DI = dyn_cast<DefInit>(N->getLeafValue())) {
Record *Def = DI->getDef();
if (Def->isSubClassOf("Register")) {
const CodeGenRegister *Reg =
CGP.getTargetInfo().getRegBank().getReg(Def);
AddMatcher(new EmitRegisterMatcher(Reg, N->getType(0)));
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
if (Def->getName() == "zero_reg") {
AddMatcher(new EmitRegisterMatcher(nullptr, N->getType(0)));
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
// Handle a reference to a register class. This is used
// in COPY_TO_SUBREG instructions.
if (Def->isSubClassOf("RegisterOperand"))
Def = Def->getValueAsDef("RegClass");
if (Def->isSubClassOf("RegisterClass")) {
std::string Value = getQualifiedName(Def) + "RegClassID";
AddMatcher(new EmitStringIntegerMatcher(Value, MVT::i32));
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
// Handle a subregister index. This is used for INSERT_SUBREG etc.
if (Def->isSubClassOf("SubRegIndex")) {
std::string Value = getQualifiedName(Def);
AddMatcher(new EmitStringIntegerMatcher(Value, MVT::i32));
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
}
errs() << "unhandled leaf node: \n";
N->dump();
}
/// GetInstPatternNode - Get the pattern for an instruction.
///
const TreePatternNode *MatcherGen::
GetInstPatternNode(const DAGInstruction &Inst, const TreePatternNode *N) {
const TreePattern *InstPat = Inst.getPattern();
// FIXME2?: Assume actual pattern comes before "implicit".
TreePatternNode *InstPatNode;
if (InstPat)
InstPatNode = InstPat->getTree(0);
else if (/*isRoot*/ N == Pattern.getDstPattern())
InstPatNode = Pattern.getSrcPattern();
else
return nullptr;
if (InstPatNode && !InstPatNode->isLeaf() &&
InstPatNode->getOperator()->getName() == "set")
InstPatNode = InstPatNode->getChild(InstPatNode->getNumChildren()-1);
return InstPatNode;
}
static bool
mayInstNodeLoadOrStore(const TreePatternNode *N,
const CodeGenDAGPatterns &CGP) {
Record *Op = N->getOperator();
const CodeGenTarget &CGT = CGP.getTargetInfo();
CodeGenInstruction &II = CGT.getInstruction(Op);
return II.mayLoad || II.mayStore;
}
static unsigned
numNodesThatMayLoadOrStore(const TreePatternNode *N,
const CodeGenDAGPatterns &CGP) {
if (N->isLeaf())
return 0;
Record *OpRec = N->getOperator();
if (!OpRec->isSubClassOf("Instruction"))
return 0;
unsigned Count = 0;
if (mayInstNodeLoadOrStore(N, CGP))
++Count;
for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
Count += numNodesThatMayLoadOrStore(N->getChild(i), CGP);
return Count;
}
void MatcherGen::
EmitResultInstructionAsOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &OutputOps) {
Record *Op = N->getOperator();
const CodeGenTarget &CGT = CGP.getTargetInfo();
CodeGenInstruction &II = CGT.getInstruction(Op);
const DAGInstruction &Inst = CGP.getInstruction(Op);
// If we can, get the pattern for the instruction we're generating. We derive
// a variety of information from this pattern, such as whether it has a chain.
//
// FIXME2: This is extremely dubious for several reasons, not the least of
// which it gives special status to instructions with patterns that Pat<>
// nodes can't duplicate.
const TreePatternNode *InstPatNode = GetInstPatternNode(Inst, N);
// NodeHasChain - Whether the instruction node we're creating takes chains.
bool NodeHasChain = InstPatNode &&
InstPatNode->TreeHasProperty(SDNPHasChain, CGP);
// Instructions which load and store from memory should have a chain,
// regardless of whether they happen to have an internal pattern saying so.
if (Pattern.getSrcPattern()->TreeHasProperty(SDNPHasChain, CGP)
&& (II.hasCtrlDep || II.mayLoad || II.mayStore || II.canFoldAsLoad ||
II.hasSideEffects))
NodeHasChain = true;
bool isRoot = N == Pattern.getDstPattern();
// TreeHasOutGlue - True if this tree has glue.
bool TreeHasInGlue = false, TreeHasOutGlue = false;
if (isRoot) {
const TreePatternNode *SrcPat = Pattern.getSrcPattern();
TreeHasInGlue = SrcPat->TreeHasProperty(SDNPOptInGlue, CGP) ||
SrcPat->TreeHasProperty(SDNPInGlue, CGP);
// FIXME2: this is checking the entire pattern, not just the node in
// question, doing this just for the root seems like a total hack.
TreeHasOutGlue = SrcPat->TreeHasProperty(SDNPOutGlue, CGP);
}
// NumResults - This is the number of results produced by the instruction in
// the "outs" list.
unsigned NumResults = Inst.getNumResults();
// Number of operands we know the output instruction must have. If it is
// variadic, we could have more operands.
unsigned NumFixedOperands = II.Operands.size();
SmallVector<unsigned, 8> InstOps;
// Loop over all of the fixed operands of the instruction pattern, emitting
// code to fill them all in. The node 'N' usually has number children equal to
// the number of input operands of the instruction. However, in cases where
// there are predicate operands for an instruction, we need to fill in the
// 'execute always' values. Match up the node operands to the instruction
// operands to do this.
unsigned ChildNo = 0;
for (unsigned InstOpNo = NumResults, e = NumFixedOperands;
InstOpNo != e; ++InstOpNo) {
// Determine what to emit for this operand.
Record *OperandNode = II.Operands[InstOpNo].Rec;
if (OperandNode->isSubClassOf("OperandWithDefaultOps") &&
!CGP.getDefaultOperand(OperandNode).DefaultOps.empty()) {
// This is a predicate or optional def operand; emit the
// 'default ops' operands.
const DAGDefaultOperand &DefaultOp
= CGP.getDefaultOperand(OperandNode);
for (unsigned i = 0, e = DefaultOp.DefaultOps.size(); i != e; ++i)
EmitResultOperand(DefaultOp.DefaultOps[i], InstOps);
continue;
}
// Otherwise this is a normal operand or a predicate operand without
// 'execute always'; emit it.
// For operands with multiple sub-operands we may need to emit
// multiple child patterns to cover them all. However, ComplexPattern
// children may themselves emit multiple MI operands.
unsigned NumSubOps = 1;
if (OperandNode->isSubClassOf("Operand")) {
DagInit *MIOpInfo = OperandNode->getValueAsDag("MIOperandInfo");
if (unsigned NumArgs = MIOpInfo->getNumArgs())
NumSubOps = NumArgs;
}
unsigned FinalNumOps = InstOps.size() + NumSubOps;
while (InstOps.size() < FinalNumOps) {
const TreePatternNode *Child = N->getChild(ChildNo);
unsigned BeforeAddingNumOps = InstOps.size();
EmitResultOperand(Child, InstOps);
assert(InstOps.size() > BeforeAddingNumOps && "Didn't add any operands");
// If the operand is an instruction and it produced multiple results, just
// take the first one.
if (!Child->isLeaf() && Child->getOperator()->isSubClassOf("Instruction"))
InstOps.resize(BeforeAddingNumOps+1);
++ChildNo;
}
}
// If this is a variadic output instruction (i.e. REG_SEQUENCE), we can't
// expand suboperands, use default operands, or other features determined from
// the CodeGenInstruction after the fixed operands, which were handled
// above. Emit the remaining instructions implicitly added by the use for
// variable_ops.
if (II.Operands.isVariadic) {
for (unsigned I = ChildNo, E = N->getNumChildren(); I < E; ++I)
EmitResultOperand(N->getChild(I), InstOps);
}
// If this node has input glue or explicitly specified input physregs, we
// need to add chained and glued copyfromreg nodes and materialize the glue
// input.
if (isRoot && !PhysRegInputs.empty()) {
// Emit all of the CopyToReg nodes for the input physical registers. These
// occur in patterns like (mul:i8 AL:i8, GR8:i8:$src).
for (unsigned i = 0, e = PhysRegInputs.size(); i != e; ++i)
AddMatcher(new EmitCopyToRegMatcher(PhysRegInputs[i].second,
PhysRegInputs[i].first));
// Even if the node has no other glue inputs, the resultant node must be
// glued to the CopyFromReg nodes we just generated.
TreeHasInGlue = true;
}
// Result order: node results, chain, glue
// Determine the result types.
SmallVector<MVT::SimpleValueType, 4> ResultVTs;
for (unsigned i = 0, e = N->getNumTypes(); i != e; ++i)
ResultVTs.push_back(N->getType(i));
// If this is the root instruction of a pattern that has physical registers in
// its result pattern, add output VTs for them. For example, X86 has:
// (set AL, (mul ...))
// This also handles implicit results like:
// (implicit EFLAGS)
if (isRoot && !Pattern.getDstRegs().empty()) {
// If the root came from an implicit def in the instruction handling stuff,
// don't re-add it.
Record *HandledReg = nullptr;
if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
HandledReg = II.ImplicitDefs[0];
for (unsigned i = 0; i != Pattern.getDstRegs().size(); ++i) {
Record *Reg = Pattern.getDstRegs()[i];
if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
ResultVTs.push_back(getRegisterValueType(Reg, CGT));
}
}
// If this is the root of the pattern and the pattern we're matching includes
// a node that is variadic, mark the generated node as variadic so that it
// gets the excess operands from the input DAG.
int NumFixedArityOperands = -1;
if (isRoot &&
Pattern.getSrcPattern()->NodeHasProperty(SDNPVariadic, CGP))
NumFixedArityOperands = Pattern.getSrcPattern()->getNumChildren();
// If this is the root node and multiple matched nodes in the input pattern
// have MemRefs in them, have the interpreter collect them and plop them onto
// this node. If there is just one node with MemRefs, leave them on that node
// even if it is not the root.
//
// FIXME3: This is actively incorrect for result patterns with multiple
// memory-referencing instructions.
bool PatternHasMemOperands =
Pattern.getSrcPattern()->TreeHasProperty(SDNPMemOperand, CGP);
bool NodeHasMemRefs = false;
if (PatternHasMemOperands) {
unsigned NumNodesThatLoadOrStore =
numNodesThatMayLoadOrStore(Pattern.getDstPattern(), CGP);
bool NodeIsUniqueLoadOrStore = mayInstNodeLoadOrStore(N, CGP) &&
NumNodesThatLoadOrStore == 1;
NodeHasMemRefs =
NodeIsUniqueLoadOrStore || (isRoot && (mayInstNodeLoadOrStore(N, CGP) ||
NumNodesThatLoadOrStore != 1));
}
assert((!ResultVTs.empty() || TreeHasOutGlue || NodeHasChain) &&
"Node has no result");
AddMatcher(new EmitNodeMatcher(II.Namespace+"::"+II.TheDef->getName(),
ResultVTs, InstOps,
NodeHasChain, TreeHasInGlue, TreeHasOutGlue,
NodeHasMemRefs, NumFixedArityOperands,
NextRecordedOperandNo));
// The non-chain and non-glue results of the newly emitted node get recorded.
for (unsigned i = 0, e = ResultVTs.size(); i != e; ++i) {
if (ResultVTs[i] == MVT::Other || ResultVTs[i] == MVT::Glue) break;
OutputOps.push_back(NextRecordedOperandNo++);
}
}
void MatcherGen::
EmitResultSDNodeXFormAsOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps) {
assert(N->getOperator()->isSubClassOf("SDNodeXForm") && "Not SDNodeXForm?");
// Emit the operand.
SmallVector<unsigned, 8> InputOps;
// FIXME2: Could easily generalize this to support multiple inputs and outputs
// to the SDNodeXForm. For now we just support one input and one output like
// the old instruction selector.
assert(N->getNumChildren() == 1);
EmitResultOperand(N->getChild(0), InputOps);
// The input currently must have produced exactly one result.
assert(InputOps.size() == 1 && "Unexpected input to SDNodeXForm");
AddMatcher(new EmitNodeXFormMatcher(InputOps[0], N->getOperator()));
ResultOps.push_back(NextRecordedOperandNo++);
}
void MatcherGen::EmitResultOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps) {
// This is something selected from the pattern we matched.
if (!N->getName().empty())
return EmitResultOfNamedOperand(N, ResultOps);
if (N->isLeaf())
return EmitResultLeafAsOperand(N, ResultOps);
Record *OpRec = N->getOperator();
if (OpRec->isSubClassOf("Instruction"))
return EmitResultInstructionAsOperand(N, ResultOps);
if (OpRec->isSubClassOf("SDNodeXForm"))
return EmitResultSDNodeXFormAsOperand(N, ResultOps);
errs() << "Unknown result node to emit code for: " << *N << '\n';
PrintFatalError("Unknown node in result pattern!");
}
void MatcherGen::EmitResultCode() {
// Patterns that match nodes with (potentially multiple) chain inputs have to
// merge them together into a token factor. This informs the generated code
// what all the chained nodes are.
if (!MatchedChainNodes.empty())
AddMatcher(new EmitMergeInputChainsMatcher(MatchedChainNodes));
// Codegen the root of the result pattern, capturing the resulting values.
SmallVector<unsigned, 8> Ops;
EmitResultOperand(Pattern.getDstPattern(), Ops);
// At this point, we have however many values the result pattern produces.
// However, the input pattern might not need all of these. If there are
// excess values at the end (such as implicit defs of condition codes etc)
// just lop them off. This doesn't need to worry about glue or chains, just
// explicit results.
//
unsigned NumSrcResults = Pattern.getSrcPattern()->getNumTypes();
// If the pattern also has (implicit) results, count them as well.
if (!Pattern.getDstRegs().empty()) {
// If the root came from an implicit def in the instruction handling stuff,
// don't re-add it.
Record *HandledReg = nullptr;
const TreePatternNode *DstPat = Pattern.getDstPattern();
if (!DstPat->isLeaf() &&DstPat->getOperator()->isSubClassOf("Instruction")){
const CodeGenTarget &CGT = CGP.getTargetInfo();
CodeGenInstruction &II = CGT.getInstruction(DstPat->getOperator());
if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
HandledReg = II.ImplicitDefs[0];
}
for (unsigned i = 0; i != Pattern.getDstRegs().size(); ++i) {
Record *Reg = Pattern.getDstRegs()[i];
if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
++NumSrcResults;
}
}
assert(Ops.size() >= NumSrcResults && "Didn't provide enough results");
Ops.resize(NumSrcResults);
// If the matched pattern covers nodes which define a glue result, emit a node
// that tells the matcher about them so that it can update their results.
if (!MatchedGlueResultNodes.empty())
AddMatcher(new MarkGlueResultsMatcher(MatchedGlueResultNodes));
AddMatcher(new CompleteMatchMatcher(Ops, Pattern));
}
/// ConvertPatternToMatcher - Create the matcher for the specified pattern with
/// the specified variant. If the variant number is invalid, this returns null.
Matcher *llvm::ConvertPatternToMatcher(const PatternToMatch &Pattern,
unsigned Variant,
const CodeGenDAGPatterns &CGP) {
MatcherGen Gen(Pattern, CGP);
// Generate the code for the matcher.
if (Gen.EmitMatcherCode(Variant))
return nullptr;
// FIXME2: Kill extra MoveParent commands at the end of the matcher sequence.
// FIXME2: Split result code out to another table, and make the matcher end
// with an "Emit <index>" command. This allows result generation stuff to be
// shared and factored?
// If the match succeeds, then we generate Pattern.
Gen.EmitResultCode();
// Unconditional match.
return Gen.GetMatcher();
}
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/DAGISelMatcher.cpp | //===- DAGISelMatcher.cpp - Representation of DAG pattern matcher ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "DAGISelMatcher.h"
#include "CodeGenDAGPatterns.h"
#include "CodeGenTarget.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TableGen/Record.h"
using namespace llvm;
void Matcher::anchor() { }
void Matcher::dump() const {
print(errs(), 0);
}
void Matcher::print(raw_ostream &OS, unsigned indent) const {
printImpl(OS, indent);
if (Next)
return Next->print(OS, indent);
}
void Matcher::printOne(raw_ostream &OS) const {
printImpl(OS, 0);
}
/// unlinkNode - Unlink the specified node from this chain. If Other == this,
/// we unlink the next pointer and return it. Otherwise we unlink Other from
/// the list and return this.
Matcher *Matcher::unlinkNode(Matcher *Other) {
if (this == Other)
return takeNext();
// Scan until we find the predecessor of Other.
Matcher *Cur = this;
for (; Cur && Cur->getNext() != Other; Cur = Cur->getNext())
/*empty*/;
if (!Cur) return nullptr;
Cur->takeNext();
Cur->setNext(Other->takeNext());
return this;
}
/// canMoveBefore - Return true if this matcher is the same as Other, or if
/// we can move this matcher past all of the nodes in-between Other and this
/// node. Other must be equal to or before this.
bool Matcher::canMoveBefore(const Matcher *Other) const {
for (;; Other = Other->getNext()) {
assert(Other && "Other didn't come before 'this'?");
if (this == Other) return true;
// We have to be able to move this node across the Other node.
if (!canMoveBeforeNode(Other))
return false;
}
}
/// canMoveBeforeNode - Return true if it is safe to move the current matcher
/// across the specified one.
bool Matcher::canMoveBeforeNode(const Matcher *Other) const {
// We can move simple predicates before record nodes.
if (isSimplePredicateNode())
return Other->isSimplePredicateOrRecordNode();
// We can move record nodes across simple predicates.
if (isSimplePredicateOrRecordNode())
return isSimplePredicateNode();
// We can't move record nodes across each other etc.
return false;
}
ScopeMatcher::~ScopeMatcher() {
for (unsigned i = 0, e = Children.size(); i != e; ++i)
delete Children[i];
}
SwitchOpcodeMatcher::~SwitchOpcodeMatcher() {
for (unsigned i = 0, e = Cases.size(); i != e; ++i)
delete Cases[i].second;
}
SwitchTypeMatcher::~SwitchTypeMatcher() {
for (unsigned i = 0, e = Cases.size(); i != e; ++i)
delete Cases[i].second;
}
CheckPredicateMatcher::CheckPredicateMatcher(const TreePredicateFn &pred)
: Matcher(CheckPredicate), Pred(pred.getOrigPatFragRecord()) {}
TreePredicateFn CheckPredicateMatcher::getPredicate() const {
return TreePredicateFn(Pred);
}
// printImpl methods.
void ScopeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "Scope\n";
for (unsigned i = 0, e = getNumChildren(); i != e; ++i) {
if (!getChild(i))
OS.indent(indent+1) << "NULL POINTER\n";
else
getChild(i)->print(OS, indent+2);
}
}
void RecordMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "Record\n";
}
void RecordChildMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "RecordChild: " << ChildNo << '\n';
}
void RecordMemRefMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "RecordMemRef\n";
}
void CaptureGlueInputMatcher::printImpl(raw_ostream &OS, unsigned indent) const{
OS.indent(indent) << "CaptureGlueInput\n";
}
void MoveChildMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "MoveChild " << ChildNo << '\n';
}
void MoveParentMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "MoveParent\n";
}
void CheckSameMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CheckSame " << MatchNumber << '\n';
}
void CheckChildSameMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CheckChild" << ChildNo << "Same\n";
}
void CheckPatternPredicateMatcher::
printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CheckPatternPredicate " << Predicate << '\n';
}
void CheckPredicateMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CheckPredicate " << getPredicate().getFnName() << '\n';
}
void CheckOpcodeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CheckOpcode " << Opcode.getEnumName() << '\n';
}
void SwitchOpcodeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "SwitchOpcode: {\n";
for (unsigned i = 0, e = Cases.size(); i != e; ++i) {
OS.indent(indent) << "case " << Cases[i].first->getEnumName() << ":\n";
Cases[i].second->print(OS, indent+2);
}
OS.indent(indent) << "}\n";
}
void CheckTypeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CheckType " << getEnumName(Type) << ", ResNo="
<< ResNo << '\n';
}
void SwitchTypeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "SwitchType: {\n";
for (unsigned i = 0, e = Cases.size(); i != e; ++i) {
OS.indent(indent) << "case " << getEnumName(Cases[i].first) << ":\n";
Cases[i].second->print(OS, indent+2);
}
OS.indent(indent) << "}\n";
}
void CheckChildTypeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CheckChildType " << ChildNo << " "
<< getEnumName(Type) << '\n';
}
void CheckIntegerMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CheckInteger " << Value << '\n';
}
void CheckChildIntegerMatcher::printImpl(raw_ostream &OS,
unsigned indent) const {
OS.indent(indent) << "CheckChildInteger " << ChildNo << " " << Value << '\n';
}
void CheckCondCodeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CheckCondCode ISD::" << CondCodeName << '\n';
}
void CheckValueTypeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CheckValueType MVT::" << TypeName << '\n';
}
void CheckComplexPatMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CheckComplexPat " << Pattern.getSelectFunc() << '\n';
}
void CheckAndImmMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CheckAndImm " << Value << '\n';
}
void CheckOrImmMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CheckOrImm " << Value << '\n';
}
void CheckFoldableChainNodeMatcher::printImpl(raw_ostream &OS,
unsigned indent) const {
OS.indent(indent) << "CheckFoldableChainNode\n";
}
void EmitIntegerMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "EmitInteger " << Val << " VT=" << VT << '\n';
}
void EmitStringIntegerMatcher::
printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "EmitStringInteger " << Val << " VT=" << VT << '\n';
}
void EmitRegisterMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "EmitRegister ";
if (Reg)
OS << Reg->getName();
else
OS << "zero_reg";
OS << " VT=" << VT << '\n';
}
void EmitConvertToTargetMatcher::
printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "EmitConvertToTarget " << Slot << '\n';
}
void EmitMergeInputChainsMatcher::
printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "EmitMergeInputChains <todo: args>\n";
}
void EmitCopyToRegMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "EmitCopyToReg <todo: args>\n";
}
void EmitNodeXFormMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "EmitNodeXForm " << NodeXForm->getName()
<< " Slot=" << Slot << '\n';
}
void EmitNodeMatcherCommon::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent);
OS << (isa<MorphNodeToMatcher>(this) ? "MorphNodeTo: " : "EmitNode: ")
<< OpcodeName << ": <todo flags> ";
for (unsigned i = 0, e = VTs.size(); i != e; ++i)
OS << ' ' << getEnumName(VTs[i]);
OS << '(';
for (unsigned i = 0, e = Operands.size(); i != e; ++i)
OS << Operands[i] << ' ';
OS << ")\n";
}
void MarkGlueResultsMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "MarkGlueResults <todo: args>\n";
}
void CompleteMatchMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "CompleteMatch <todo args>\n";
OS.indent(indent) << "Src = " << *Pattern.getSrcPattern() << "\n";
OS.indent(indent) << "Dst = " << *Pattern.getDstPattern() << "\n";
}
// getHashImpl Implementation.
unsigned CheckPatternPredicateMatcher::getHashImpl() const {
return HashString(Predicate);
}
unsigned CheckPredicateMatcher::getHashImpl() const {
return HashString(getPredicate().getFnName());
}
unsigned CheckOpcodeMatcher::getHashImpl() const {
return HashString(Opcode.getEnumName());
}
unsigned CheckCondCodeMatcher::getHashImpl() const {
return HashString(CondCodeName);
}
unsigned CheckValueTypeMatcher::getHashImpl() const {
return HashString(TypeName);
}
unsigned EmitStringIntegerMatcher::getHashImpl() const {
return HashString(Val) ^ VT;
}
template<typename It>
static unsigned HashUnsigneds(It I, It E) {
unsigned Result = 0;
for (; I != E; ++I)
Result = (Result<<3) ^ *I;
return Result;
}
unsigned EmitMergeInputChainsMatcher::getHashImpl() const {
return HashUnsigneds(ChainNodes.begin(), ChainNodes.end());
}
bool CheckOpcodeMatcher::isEqualImpl(const Matcher *M) const {
// Note: pointer equality isn't enough here, we have to check the enum names
// to ensure that the nodes are for the same opcode.
return cast<CheckOpcodeMatcher>(M)->Opcode.getEnumName() ==
Opcode.getEnumName();
}
bool EmitNodeMatcherCommon::isEqualImpl(const Matcher *m) const {
const EmitNodeMatcherCommon *M = cast<EmitNodeMatcherCommon>(m);
return M->OpcodeName == OpcodeName && M->VTs == VTs &&
M->Operands == Operands && M->HasChain == HasChain &&
M->HasInGlue == HasInGlue && M->HasOutGlue == HasOutGlue &&
M->HasMemRefs == HasMemRefs &&
M->NumFixedArityOperands == NumFixedArityOperands;
}
unsigned EmitNodeMatcherCommon::getHashImpl() const {
return (HashString(OpcodeName) << 4) | Operands.size();
}
void EmitNodeMatcher::anchor() { }
void MorphNodeToMatcher::anchor() { }
unsigned MarkGlueResultsMatcher::getHashImpl() const {
return HashUnsigneds(GlueResultNodes.begin(), GlueResultNodes.end());
}
unsigned CompleteMatchMatcher::getHashImpl() const {
return HashUnsigneds(Results.begin(), Results.end()) ^
((unsigned)(intptr_t)&Pattern << 8);
}
// isContradictoryImpl Implementations.
static bool TypesAreContradictory(MVT::SimpleValueType T1,
MVT::SimpleValueType T2) {
// If the two types are the same, then they are the same, so they don't
// contradict.
if (T1 == T2) return false;
// If either type is about iPtr, then they don't conflict unless the other
// one is not a scalar integer type.
if (T1 == MVT::iPTR)
return !MVT(T2).isInteger() || MVT(T2).isVector();
if (T2 == MVT::iPTR)
return !MVT(T1).isInteger() || MVT(T1).isVector();
// Otherwise, they are two different non-iPTR types, they conflict.
return true;
}
bool CheckOpcodeMatcher::isContradictoryImpl(const Matcher *M) const {
if (const CheckOpcodeMatcher *COM = dyn_cast<CheckOpcodeMatcher>(M)) {
// One node can't have two different opcodes!
// Note: pointer equality isn't enough here, we have to check the enum names
// to ensure that the nodes are for the same opcode.
return COM->getOpcode().getEnumName() != getOpcode().getEnumName();
}
// If the node has a known type, and if the type we're checking for is
// different, then we know they contradict. For example, a check for
// ISD::STORE will never be true at the same time a check for Type i32 is.
if (const CheckTypeMatcher *CT = dyn_cast<CheckTypeMatcher>(M)) {
// If checking for a result the opcode doesn't have, it can't match.
if (CT->getResNo() >= getOpcode().getNumResults())
return true;
MVT::SimpleValueType NodeType = getOpcode().getKnownType(CT->getResNo());
if (NodeType != MVT::Other)
return TypesAreContradictory(NodeType, CT->getType());
}
return false;
}
bool CheckTypeMatcher::isContradictoryImpl(const Matcher *M) const {
if (const CheckTypeMatcher *CT = dyn_cast<CheckTypeMatcher>(M))
return TypesAreContradictory(getType(), CT->getType());
return false;
}
bool CheckChildTypeMatcher::isContradictoryImpl(const Matcher *M) const {
if (const CheckChildTypeMatcher *CC = dyn_cast<CheckChildTypeMatcher>(M)) {
// If the two checks are about different nodes, we don't know if they
// conflict!
if (CC->getChildNo() != getChildNo())
return false;
return TypesAreContradictory(getType(), CC->getType());
}
return false;
}
bool CheckIntegerMatcher::isContradictoryImpl(const Matcher *M) const {
if (const CheckIntegerMatcher *CIM = dyn_cast<CheckIntegerMatcher>(M))
return CIM->getValue() != getValue();
return false;
}
bool CheckChildIntegerMatcher::isContradictoryImpl(const Matcher *M) const {
if (const CheckChildIntegerMatcher *CCIM = dyn_cast<CheckChildIntegerMatcher>(M)) {
// If the two checks are about different nodes, we don't know if they
// conflict!
if (CCIM->getChildNo() != getChildNo())
return false;
return CCIM->getValue() != getValue();
}
return false;
}
bool CheckValueTypeMatcher::isContradictoryImpl(const Matcher *M) const {
if (const CheckValueTypeMatcher *CVT = dyn_cast<CheckValueTypeMatcher>(M))
return CVT->getTypeName() != getTypeName();
return false;
}
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/CodeGenSchedule.h | //===- CodeGenSchedule.h - Scheduling Machine Models ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines structures to encapsulate the machine model as described in
// the target description.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_UTILS_TABLEGEN_CODEGENSCHEDULE_H
#define LLVM_UTILS_TABLEGEN_CODEGENSCHEDULE_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/SetTheory.h"
namespace llvm {
class CodeGenTarget;
class CodeGenSchedModels;
class CodeGenInstruction;
typedef std::vector<Record*> RecVec;
typedef std::vector<Record*>::const_iterator RecIter;
typedef std::vector<unsigned> IdxVec;
typedef std::vector<unsigned>::const_iterator IdxIter;
void splitSchedReadWrites(const RecVec &RWDefs,
RecVec &WriteDefs, RecVec &ReadDefs);
/// We have two kinds of SchedReadWrites. Explicitly defined and inferred
/// sequences. TheDef is nonnull for explicit SchedWrites, but Sequence may or
/// may not be empty. TheDef is null for inferred sequences, and Sequence must
/// be nonempty.
///
/// IsVariadic controls whether the variants are expanded into multiple operands
/// or a sequence of writes on one operand.
struct CodeGenSchedRW {
unsigned Index;
std::string Name;
Record *TheDef;
bool IsRead;
bool IsAlias;
bool HasVariants;
bool IsVariadic;
bool IsSequence;
IdxVec Sequence;
RecVec Aliases;
CodeGenSchedRW()
: Index(0), TheDef(nullptr), IsRead(false), IsAlias(false),
HasVariants(false), IsVariadic(false), IsSequence(false) {}
CodeGenSchedRW(unsigned Idx, Record *Def)
: Index(Idx), TheDef(Def), IsAlias(false), IsVariadic(false) {
Name = Def->getName();
IsRead = Def->isSubClassOf("SchedRead");
HasVariants = Def->isSubClassOf("SchedVariant");
if (HasVariants)
IsVariadic = Def->getValueAsBit("Variadic");
// Read records don't currently have sequences, but it can be easily
// added. Note that implicit Reads (from ReadVariant) may have a Sequence
// (but no record).
IsSequence = Def->isSubClassOf("WriteSequence");
}
CodeGenSchedRW(unsigned Idx, bool Read, const IdxVec &Seq,
const std::string &Name)
: Index(Idx), Name(Name), TheDef(nullptr), IsRead(Read), IsAlias(false),
HasVariants(false), IsVariadic(false), IsSequence(true), Sequence(Seq) {
assert(Sequence.size() > 1 && "implied sequence needs >1 RWs");
}
bool isValid() const {
assert((!HasVariants || TheDef) && "Variant write needs record def");
assert((!IsVariadic || HasVariants) && "Variadic write needs variants");
assert((!IsSequence || !HasVariants) && "Sequence can't have variant");
assert((!IsSequence || !Sequence.empty()) && "Sequence should be nonempty");
assert((!IsAlias || Aliases.empty()) && "Alias cannot have aliases");
return TheDef || !Sequence.empty();
}
#ifndef NDEBUG
void dump() const;
#endif
};
/// Represent a transition between SchedClasses induced by SchedVariant.
struct CodeGenSchedTransition {
unsigned ToClassIdx;
IdxVec ProcIndices;
RecVec PredTerm;
};
/// Scheduling class.
///
/// Each instruction description will be mapped to a scheduling class. There are
/// four types of classes:
///
/// 1) An explicitly defined itinerary class with ItinClassDef set.
/// Writes and ReadDefs are empty. ProcIndices contains 0 for any processor.
///
/// 2) An implied class with a list of SchedWrites and SchedReads that are
/// defined in an instruction definition and which are common across all
/// subtargets. ProcIndices contains 0 for any processor.
///
/// 3) An implied class with a list of InstRW records that map instructions to
/// SchedWrites and SchedReads per-processor. InstrClassMap should map the same
/// instructions to this class. ProcIndices contains all the processors that
/// provided InstrRW records for this class. ItinClassDef or Writes/Reads may
/// still be defined for processors with no InstRW entry.
///
/// 4) An inferred class represents a variant of another class that may be
/// resolved at runtime. ProcIndices contains the set of processors that may
/// require the class. ProcIndices are propagated through SchedClasses as
/// variants are expanded. Multiple SchedClasses may be inferred from an
/// itinerary class. Each inherits the processor index from the ItinRW record
/// that mapped the itinerary class to the variant Writes or Reads.
struct CodeGenSchedClass {
unsigned Index;
std::string Name;
Record *ItinClassDef;
IdxVec Writes;
IdxVec Reads;
// Sorted list of ProcIdx, where ProcIdx==0 implies any processor.
IdxVec ProcIndices;
std::vector<CodeGenSchedTransition> Transitions;
// InstRW records associated with this class. These records may refer to an
// Instruction no longer mapped to this class by InstrClassMap. These
// Instructions should be ignored by this class because they have been split
// off to join another inferred class.
RecVec InstRWs;
CodeGenSchedClass(): Index(0), ItinClassDef(nullptr) {}
bool isKeyEqual(Record *IC, const IdxVec &W, const IdxVec &R) {
return ItinClassDef == IC && Writes == W && Reads == R;
}
// Is this class generated from a variants if existing classes? Instructions
// are never mapped directly to inferred scheduling classes.
bool isInferred() const { return !ItinClassDef; }
#ifndef NDEBUG
void dump(const CodeGenSchedModels *SchedModels) const;
#endif
};
// Processor model.
//
// ModelName is a unique name used to name an instantiation of MCSchedModel.
//
// ModelDef is NULL for inferred Models. This happens when a processor defines
// an itinerary but no machine model. If the processor defines neither a machine
// model nor itinerary, then ModelDef remains pointing to NoModel. NoModel has
// the special "NoModel" field set to true.
//
// ItinsDef always points to a valid record definition, but may point to the
// default NoItineraries. NoItineraries has an empty list of InstrItinData
// records.
//
// ItinDefList orders this processor's InstrItinData records by SchedClass idx.
struct CodeGenProcModel {
unsigned Index;
std::string ModelName;
Record *ModelDef;
Record *ItinsDef;
// Derived members...
// Array of InstrItinData records indexed by a CodeGenSchedClass index.
// This list is empty if the Processor has no value for Itineraries.
// Initialized by collectProcItins().
RecVec ItinDefList;
// Map itinerary classes to per-operand resources.
// This list is empty if no ItinRW refers to this Processor.
RecVec ItinRWDefs;
// All read/write resources associated with this processor.
RecVec WriteResDefs;
RecVec ReadAdvanceDefs;
// Per-operand machine model resources associated with this processor.
RecVec ProcResourceDefs;
RecVec ProcResGroupDefs;
CodeGenProcModel(unsigned Idx, const std::string &Name, Record *MDef,
Record *IDef) :
Index(Idx), ModelName(Name), ModelDef(MDef), ItinsDef(IDef) {}
bool hasItineraries() const {
return !ItinsDef->getValueAsListOfDefs("IID").empty();
}
bool hasInstrSchedModel() const {
return !WriteResDefs.empty() || !ItinRWDefs.empty();
}
unsigned getProcResourceIdx(Record *PRDef) const;
#ifndef NDEBUG
void dump() const;
#endif
};
/// Top level container for machine model data.
class CodeGenSchedModels {
RecordKeeper &Records;
const CodeGenTarget &Target;
// Map dag expressions to Instruction lists.
SetTheory Sets;
// List of unique processor models.
std::vector<CodeGenProcModel> ProcModels;
// Map Processor's MachineModel or ProcItin to a CodeGenProcModel index.
typedef DenseMap<Record*, unsigned> ProcModelMapTy;
ProcModelMapTy ProcModelMap;
// Per-operand SchedReadWrite types.
std::vector<CodeGenSchedRW> SchedWrites;
std::vector<CodeGenSchedRW> SchedReads;
// List of unique SchedClasses.
std::vector<CodeGenSchedClass> SchedClasses;
// Any inferred SchedClass has an index greater than NumInstrSchedClassses.
unsigned NumInstrSchedClasses;
// Map each instruction to its unique SchedClass index considering the
// combination of it's itinerary class, SchedRW list, and InstRW records.
typedef DenseMap<Record*, unsigned> InstClassMapTy;
InstClassMapTy InstrClassMap;
public:
CodeGenSchedModels(RecordKeeper& RK, const CodeGenTarget &TGT);
// iterator access to the scheduling classes.
typedef std::vector<CodeGenSchedClass>::iterator class_iterator;
typedef std::vector<CodeGenSchedClass>::const_iterator const_class_iterator;
class_iterator classes_begin() { return SchedClasses.begin(); }
const_class_iterator classes_begin() const { return SchedClasses.begin(); }
class_iterator classes_end() { return SchedClasses.end(); }
const_class_iterator classes_end() const { return SchedClasses.end(); }
iterator_range<class_iterator> classes() {
return iterator_range<class_iterator>(classes_begin(), classes_end());
}
iterator_range<const_class_iterator> classes() const {
return iterator_range<const_class_iterator>(classes_begin(), classes_end());
}
iterator_range<class_iterator> explicit_classes() {
return iterator_range<class_iterator>(
classes_begin(), classes_begin() + NumInstrSchedClasses);
}
iterator_range<const_class_iterator> explicit_classes() const {
return iterator_range<const_class_iterator>(
classes_begin(), classes_begin() + NumInstrSchedClasses);
}
Record *getModelOrItinDef(Record *ProcDef) const {
Record *ModelDef = ProcDef->getValueAsDef("SchedModel");
Record *ItinsDef = ProcDef->getValueAsDef("ProcItin");
if (!ItinsDef->getValueAsListOfDefs("IID").empty()) {
assert(ModelDef->getValueAsBit("NoModel")
&& "Itineraries must be defined within SchedMachineModel");
return ItinsDef;
}
return ModelDef;
}
const CodeGenProcModel &getModelForProc(Record *ProcDef) const {
Record *ModelDef = getModelOrItinDef(ProcDef);
ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
assert(I != ProcModelMap.end() && "missing machine model");
return ProcModels[I->second];
}
CodeGenProcModel &getProcModel(Record *ModelDef) {
ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
assert(I != ProcModelMap.end() && "missing machine model");
return ProcModels[I->second];
}
const CodeGenProcModel &getProcModel(Record *ModelDef) const {
return const_cast<CodeGenSchedModels*>(this)->getProcModel(ModelDef);
}
// Iterate over the unique processor models.
typedef std::vector<CodeGenProcModel>::const_iterator ProcIter;
ProcIter procModelBegin() const { return ProcModels.begin(); }
ProcIter procModelEnd() const { return ProcModels.end(); }
// Return true if any processors have itineraries.
bool hasItineraries() const;
// Get a SchedWrite from its index.
const CodeGenSchedRW &getSchedWrite(unsigned Idx) const {
assert(Idx < SchedWrites.size() && "bad SchedWrite index");
assert(SchedWrites[Idx].isValid() && "invalid SchedWrite");
return SchedWrites[Idx];
}
// Get a SchedWrite from its index.
const CodeGenSchedRW &getSchedRead(unsigned Idx) const {
assert(Idx < SchedReads.size() && "bad SchedRead index");
assert(SchedReads[Idx].isValid() && "invalid SchedRead");
return SchedReads[Idx];
}
const CodeGenSchedRW &getSchedRW(unsigned Idx, bool IsRead) const {
return IsRead ? getSchedRead(Idx) : getSchedWrite(Idx);
}
CodeGenSchedRW &getSchedRW(Record *Def) {
bool IsRead = Def->isSubClassOf("SchedRead");
unsigned Idx = getSchedRWIdx(Def, IsRead);
return const_cast<CodeGenSchedRW&>(
IsRead ? getSchedRead(Idx) : getSchedWrite(Idx));
}
const CodeGenSchedRW &getSchedRW(Record*Def) const {
return const_cast<CodeGenSchedModels&>(*this).getSchedRW(Def);
}
unsigned getSchedRWIdx(Record *Def, bool IsRead, unsigned After = 0) const;
// Return true if the given write record is referenced by a ReadAdvance.
bool hasReadOfWrite(Record *WriteDef) const;
// Get a SchedClass from its index.
CodeGenSchedClass &getSchedClass(unsigned Idx) {
assert(Idx < SchedClasses.size() && "bad SchedClass index");
return SchedClasses[Idx];
}
const CodeGenSchedClass &getSchedClass(unsigned Idx) const {
assert(Idx < SchedClasses.size() && "bad SchedClass index");
return SchedClasses[Idx];
}
// Get the SchedClass index for an instruction. Instructions with no
// itinerary, no SchedReadWrites, and no InstrReadWrites references return 0
// for NoItinerary.
unsigned getSchedClassIdx(const CodeGenInstruction &Inst) const;
typedef std::vector<CodeGenSchedClass>::const_iterator SchedClassIter;
SchedClassIter schedClassBegin() const { return SchedClasses.begin(); }
SchedClassIter schedClassEnd() const { return SchedClasses.end(); }
unsigned numInstrSchedClasses() const { return NumInstrSchedClasses; }
void findRWs(const RecVec &RWDefs, IdxVec &Writes, IdxVec &Reads) const;
void findRWs(const RecVec &RWDefs, IdxVec &RWs, bool IsRead) const;
void expandRWSequence(unsigned RWIdx, IdxVec &RWSeq, bool IsRead) const;
void expandRWSeqForProc(unsigned RWIdx, IdxVec &RWSeq, bool IsRead,
const CodeGenProcModel &ProcModel) const;
unsigned addSchedClass(Record *ItinDef, const IdxVec &OperWrites,
const IdxVec &OperReads, const IdxVec &ProcIndices);
unsigned findOrInsertRW(ArrayRef<unsigned> Seq, bool IsRead);
unsigned findSchedClassIdx(Record *ItinClassDef,
const IdxVec &Writes,
const IdxVec &Reads) const;
Record *findProcResUnits(Record *ProcResKind,
const CodeGenProcModel &PM) const;
private:
void collectProcModels();
// Initialize a new processor model if it is unique.
void addProcModel(Record *ProcDef);
void collectSchedRW();
std::string genRWName(const IdxVec& Seq, bool IsRead);
unsigned findRWForSequence(const IdxVec &Seq, bool IsRead);
void collectSchedClasses();
std::string createSchedClassName(Record *ItinClassDef,
const IdxVec &OperWrites,
const IdxVec &OperReads);
std::string createSchedClassName(const RecVec &InstDefs);
void createInstRWClass(Record *InstRWDef);
void collectProcItins();
void collectProcItinRW();
void inferSchedClasses();
void inferFromRW(const IdxVec &OperWrites, const IdxVec &OperReads,
unsigned FromClassIdx, const IdxVec &ProcIndices);
void inferFromItinClass(Record *ItinClassDef, unsigned FromClassIdx);
void inferFromInstRWs(unsigned SCIdx);
bool hasSuperGroup(RecVec &SubUnits, CodeGenProcModel &PM);
void verifyProcResourceGroups(CodeGenProcModel &PM);
void collectProcResources();
void collectItinProcResources(Record *ItinClassDef);
void collectRWResources(unsigned RWIdx, bool IsRead,
const IdxVec &ProcIndices);
void collectRWResources(const IdxVec &Writes, const IdxVec &Reads,
const IdxVec &ProcIndices);
void addProcResource(Record *ProcResourceKind, CodeGenProcModel &PM);
void addWriteRes(Record *ProcWriteResDef, unsigned PIdx);
void addReadAdvance(Record *ProcReadAdvanceDef, unsigned PIdx);
};
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/DFAPacketizerEmitter.cpp | //===- DFAPacketizerEmitter.cpp - Packetization DFA for a VLIW machine-----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class parses the Schedule.td file and produces an API that can be used
// to reason about whether an instruction can be added to a packet on a VLIW
// architecture. The class internally generates a deterministic finite
// automaton (DFA) that models all possible mappings of machine instructions
// to functional units as instructions are added to a packet.
//
//===----------------------------------------------------------------------===//
#include "CodeGenTarget.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <list>
#include <map>
#include <string>
using namespace llvm;
//
// class DFAPacketizerEmitter: class that generates and prints out the DFA
// for resource tracking.
//
namespace {
class DFAPacketizerEmitter {
private:
std::string TargetName;
//
// allInsnClasses is the set of all possible resources consumed by an
// InstrStage.
//
DenseSet<unsigned> allInsnClasses;
RecordKeeper &Records;
public:
DFAPacketizerEmitter(RecordKeeper &R);
//
// collectAllInsnClasses: Populate allInsnClasses which is a set of units
// used in each stage.
//
void collectAllInsnClasses(const std::string &Name,
Record *ItinData,
unsigned &NStages,
raw_ostream &OS);
void run(raw_ostream &OS);
};
} // End anonymous namespace.
//
//
// State represents the usage of machine resources if the packet contains
// a set of instruction classes.
//
// Specifically, currentState is a set of bit-masks.
// The nth bit in a bit-mask indicates whether the nth resource is being used
// by this state. The set of bit-masks in a state represent the different
// possible outcomes of transitioning to this state.
// For example: consider a two resource architecture: resource L and resource M
// with three instruction classes: L, M, and L_or_M.
// From the initial state (currentState = 0x00), if we add instruction class
// L_or_M we will transition to a state with currentState = [0x01, 0x10]. This
// represents the possible resource states that can result from adding a L_or_M
// instruction
//
// Another way of thinking about this transition is we are mapping a NDFA with
// two states [0x01] and [0x10] into a DFA with a single state [0x01, 0x10].
//
// A State instance also contains a collection of transitions from that state:
// a map from inputs to new states.
//
namespace {
class State {
public:
static int currentStateNum;
// stateNum is the only member used for equality/ordering, all other members
// can be mutated even in const State objects.
const int stateNum;
mutable bool isInitial;
mutable std::set<unsigned> stateInfo;
typedef std::map<unsigned, const State *> TransitionMap;
mutable TransitionMap Transitions;
State();
bool operator<(const State &s) const {
return stateNum < s.stateNum;
}
//
// canAddInsnClass - Returns true if an instruction of type InsnClass is a
// valid transition from this state, i.e., can an instruction of type InsnClass
// be added to the packet represented by this state.
//
// PossibleStates is the set of valid resource states that ensue from valid
// transitions.
//
bool canAddInsnClass(unsigned InsnClass) const;
//
// AddInsnClass - Return all combinations of resource reservation
// which are possible from this state (PossibleStates).
//
void AddInsnClass(unsigned InsnClass, std::set<unsigned> &PossibleStates) const;
//
// addTransition - Add a transition from this state given the input InsnClass
//
void addTransition(unsigned InsnClass, const State *To) const;
//
// hasTransition - Returns true if there is a transition from this state
// given the input InsnClass
//
bool hasTransition(unsigned InsnClass) const;
};
} // End anonymous namespace.
//
// class DFA: deterministic finite automaton for processor resource tracking.
//
namespace {
class DFA {
public:
DFA();
// Set of states. Need to keep this sorted to emit the transition table.
typedef std::set<State> StateSet;
StateSet states;
State *currentState;
//
// Modify the DFA.
//
const State &newState();
//
// writeTable: Print out a table representing the DFA.
//
void writeTableAndAPI(raw_ostream &OS, const std::string &ClassName);
};
} // End anonymous namespace.
//
// Constructors and destructors for State and DFA
//
State::State() :
stateNum(currentStateNum++), isInitial(false) {}
DFA::DFA(): currentState(nullptr) {}
//
// addTransition - Add a transition from this state given the input InsnClass
//
void State::addTransition(unsigned InsnClass, const State *To) const {
assert(!Transitions.count(InsnClass) &&
"Cannot have multiple transitions for the same input");
Transitions[InsnClass] = To;
}
//
// hasTransition - Returns true if there is a transition from this state
// given the input InsnClass
//
bool State::hasTransition(unsigned InsnClass) const {
return Transitions.count(InsnClass) > 0;
}
//
// AddInsnClass - Return all combinations of resource reservation
// which are possible from this state (PossibleStates).
//
void State::AddInsnClass(unsigned InsnClass,
std::set<unsigned> &PossibleStates) const {
//
// Iterate over all resource states in currentState.
//
for (std::set<unsigned>::iterator SI = stateInfo.begin();
SI != stateInfo.end(); ++SI) {
unsigned thisState = *SI;
//
// Iterate over all possible resources used in InsnClass.
// For ex: for InsnClass = 0x11, all resources = {0x01, 0x10}.
//
DenseSet<unsigned> VisitedResourceStates;
for (unsigned int j = 0; j < sizeof(InsnClass) * 8; ++j) {
if ((0x1 << j) & InsnClass) {
//
// For each possible resource used in InsnClass, generate the
// resource state if that resource was used.
//
unsigned ResultingResourceState = thisState | (0x1 << j);
//
// Check if the resulting resource state can be accommodated in this
// packet.
// We compute ResultingResourceState OR thisState.
// If the result of the OR is different than thisState, it implies
// that there is at least one resource that can be used to schedule
// InsnClass in the current packet.
// Insert ResultingResourceState into PossibleStates only if we haven't
// processed ResultingResourceState before.
//
if ((ResultingResourceState != thisState) &&
(VisitedResourceStates.count(ResultingResourceState) == 0)) {
VisitedResourceStates.insert(ResultingResourceState);
PossibleStates.insert(ResultingResourceState);
}
}
}
}
}
//
// canAddInsnClass - Quickly verifies if an instruction of type InsnClass is a
// valid transition from this state i.e., can an instruction of type InsnClass
// be added to the packet represented by this state.
//
bool State::canAddInsnClass(unsigned InsnClass) const {
for (std::set<unsigned>::const_iterator SI = stateInfo.begin();
SI != stateInfo.end(); ++SI) {
if (~*SI & InsnClass)
return true;
}
return false;
}
const State &DFA::newState() {
auto IterPair = states.insert(State());
assert(IterPair.second && "State already exists");
return *IterPair.first;
}
int State::currentStateNum = 0;
DFAPacketizerEmitter::DFAPacketizerEmitter(RecordKeeper &R):
TargetName(CodeGenTarget(R).getName()),
allInsnClasses(), Records(R) {}
//
// writeTableAndAPI - Print out a table representing the DFA and the
// associated API to create a DFA packetizer.
//
// Format:
// DFAStateInputTable[][2] = pairs of <Input, Transition> for all valid
// transitions.
// DFAStateEntryTable[i] = Index of the first entry in DFAStateInputTable for
// the ith state.
//
//
void DFA::writeTableAndAPI(raw_ostream &OS, const std::string &TargetName) {
static const std::string SentinelEntry = "{-1, -1}";
DFA::StateSet::iterator SI = states.begin();
// This table provides a map to the beginning of the transitions for State s
// in DFAStateInputTable.
std::vector<int> StateEntry(states.size());
OS << "namespace llvm {\n\n";
OS << "const int " << TargetName << "DFAStateInputTable[][2] = {\n";
// Tracks the total valid transitions encountered so far. It is used
// to construct the StateEntry table.
int ValidTransitions = 0;
for (unsigned i = 0; i < states.size(); ++i, ++SI) {
assert ((SI->stateNum == (int) i) && "Mismatch in state numbers");
StateEntry[i] = ValidTransitions;
for (State::TransitionMap::iterator
II = SI->Transitions.begin(), IE = SI->Transitions.end();
II != IE; ++II) {
OS << "{" << II->first << ", "
<< II->second->stateNum
<< "}, ";
}
ValidTransitions += SI->Transitions.size();
// If there are no valid transitions from this stage, we need a sentinel
// transition.
if (ValidTransitions == StateEntry[i]) {
OS << SentinelEntry << ",";
++ValidTransitions;
}
OS << "\n";
}
// Print out a sentinel entry at the end of the StateInputTable. This is
// needed to iterate over StateInputTable in DFAPacketizer::ReadTable()
OS << SentinelEntry << "\n";
OS << "};\n\n";
OS << "const unsigned int " << TargetName << "DFAStateEntryTable[] = {\n";
// Multiply i by 2 since each entry in DFAStateInputTable is a set of
// two numbers.
for (unsigned i = 0; i < states.size(); ++i)
OS << StateEntry[i] << ", ";
// Print out the index to the sentinel entry in StateInputTable
OS << ValidTransitions << ", ";
OS << "\n};\n";
OS << "} // namespace\n";
//
// Emit DFA Packetizer tables if the target is a VLIW machine.
//
std::string SubTargetClassName = TargetName + "GenSubtargetInfo";
OS << "\n" << "#include \"llvm/CodeGen/DFAPacketizer.h\"\n";
OS << "namespace llvm {\n";
OS << "DFAPacketizer *" << SubTargetClassName << "::"
<< "createDFAPacketizer(const InstrItineraryData *IID) const {\n"
<< " return new DFAPacketizer(IID, " << TargetName
<< "DFAStateInputTable, " << TargetName << "DFAStateEntryTable);\n}\n\n";
OS << "} // End llvm namespace \n";
}
//
// collectAllInsnClasses - Populate allInsnClasses which is a set of units
// used in each stage.
//
void DFAPacketizerEmitter::collectAllInsnClasses(const std::string &Name,
Record *ItinData,
unsigned &NStages,
raw_ostream &OS) {
// Collect processor itineraries.
std::vector<Record*> ProcItinList =
Records.getAllDerivedDefinitions("ProcessorItineraries");
// If just no itinerary then don't bother.
if (ProcItinList.size() < 2)
return;
std::map<std::string, unsigned> NameToBitsMap;
// Parse functional units for all the itineraries.
for (unsigned i = 0, N = ProcItinList.size(); i < N; ++i) {
Record *Proc = ProcItinList[i];
std::vector<Record*> FUs = Proc->getValueAsListOfDefs("FU");
// Convert macros to bits for each stage.
for (unsigned i = 0, N = FUs.size(); i < N; ++i)
NameToBitsMap[FUs[i]->getName()] = (unsigned) (1U << i);
}
const std::vector<Record*> &StageList =
ItinData->getValueAsListOfDefs("Stages");
// The number of stages.
NStages = StageList.size();
// For each unit.
unsigned UnitBitValue = 0;
// Compute the bitwise or of each unit used in this stage.
for (unsigned i = 0; i < NStages; ++i) {
const Record *Stage = StageList[i];
// Get unit list.
const std::vector<Record*> &UnitList =
Stage->getValueAsListOfDefs("Units");
for (unsigned j = 0, M = UnitList.size(); j < M; ++j) {
// Conduct bitwise or.
std::string UnitName = UnitList[j]->getName();
assert(NameToBitsMap.count(UnitName));
UnitBitValue |= NameToBitsMap[UnitName];
}
if (UnitBitValue != 0)
allInsnClasses.insert(UnitBitValue);
}
}
//
// Run the worklist algorithm to generate the DFA.
//
void DFAPacketizerEmitter::run(raw_ostream &OS) {
// Collect processor iteraries.
std::vector<Record*> ProcItinList =
Records.getAllDerivedDefinitions("ProcessorItineraries");
//
// Collect the instruction classes.
//
for (unsigned i = 0, N = ProcItinList.size(); i < N; i++) {
Record *Proc = ProcItinList[i];
// Get processor itinerary name.
const std::string &Name = Proc->getName();
// Skip default.
if (Name == "NoItineraries")
continue;
// Sanity check for at least one instruction itinerary class.
unsigned NItinClasses =
Records.getAllDerivedDefinitions("InstrItinClass").size();
if (NItinClasses == 0)
return;
// Get itinerary data list.
std::vector<Record*> ItinDataList = Proc->getValueAsListOfDefs("IID");
// Collect instruction classes for all itinerary data.
for (unsigned j = 0, M = ItinDataList.size(); j < M; j++) {
Record *ItinData = ItinDataList[j];
unsigned NStages;
collectAllInsnClasses(Name, ItinData, NStages, OS);
}
}
//
// Run a worklist algorithm to generate the DFA.
//
DFA D;
const State *Initial = &D.newState();
Initial->isInitial = true;
Initial->stateInfo.insert(0x0);
SmallVector<const State*, 32> WorkList;
std::map<std::set<unsigned>, const State*> Visited;
WorkList.push_back(Initial);
//
// Worklist algorithm to create a DFA for processor resource tracking.
// C = {set of InsnClasses}
// Begin with initial node in worklist. Initial node does not have
// any consumed resources,
// ResourceState = 0x0
// Visited = {}
// While worklist != empty
// S = first element of worklist
// For every instruction class C
// if we can accommodate C in S:
// S' = state with resource states = {S Union C}
// Add a new transition: S x C -> S'
// If S' is not in Visited:
// Add S' to worklist
// Add S' to Visited
//
while (!WorkList.empty()) {
const State *current = WorkList.pop_back_val();
for (DenseSet<unsigned>::iterator CI = allInsnClasses.begin(),
CE = allInsnClasses.end(); CI != CE; ++CI) {
unsigned InsnClass = *CI;
std::set<unsigned> NewStateResources;
//
// If we haven't already created a transition for this input
// and the state can accommodate this InsnClass, create a transition.
//
if (!current->hasTransition(InsnClass) &&
current->canAddInsnClass(InsnClass)) {
const State *NewState;
current->AddInsnClass(InsnClass, NewStateResources);
assert(!NewStateResources.empty() && "New states must be generated");
//
// If we have seen this state before, then do not create a new state.
//
//
auto VI = Visited.find(NewStateResources);
if (VI != Visited.end())
NewState = VI->second;
else {
NewState = &D.newState();
NewState->stateInfo = NewStateResources;
Visited[NewStateResources] = NewState;
WorkList.push_back(NewState);
}
current->addTransition(InsnClass, NewState);
}
}
}
// Print out the table.
D.writeTableAndAPI(OS, TargetName);
}
namespace llvm {
void EmitDFAPacketizer(RecordKeeper &RK, raw_ostream &OS) {
emitSourceFileHeader("Target DFA Packetizer Tables", OS);
DFAPacketizerEmitter(RK).run(OS);
}
} // End llvm namespace
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/CodeGenRegisters.h | //===- CodeGenRegisters.h - Register and RegisterClass Info -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines structures to encapsulate information gleaned from the
// target register and register class definitions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_UTILS_TABLEGEN_CODEGENREGISTERS_H
#define LLVM_UTILS_TABLEGEN_CODEGENREGISTERS_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SparseBitVector.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/SetTheory.h"
#include <cstdlib>
#include <list>
#include <map>
#include <set>
#include <string>
#include <vector>
#include <deque>
namespace llvm {
class CodeGenRegBank;
/// Used to encode a step in a register lane mask transformation.
/// Mask the bits specified in Mask, then rotate them Rol bits to the left
/// assuming a wraparound at 32bits.
struct MaskRolPair {
unsigned Mask;
uint8_t RotateLeft;
bool operator==(const MaskRolPair Other) const {
return Mask == Other.Mask && RotateLeft == Other.RotateLeft;
}
bool operator!=(const MaskRolPair Other) const {
return Mask != Other.Mask || RotateLeft != Other.RotateLeft;
}
};
/// CodeGenSubRegIndex - Represents a sub-register index.
class CodeGenSubRegIndex {
Record *const TheDef;
std::string Name;
std::string Namespace;
public:
uint16_t Size;
uint16_t Offset;
const unsigned EnumValue;
mutable unsigned LaneMask;
mutable SmallVector<MaskRolPair,1> CompositionLaneMaskTransform;
// Are all super-registers containing this SubRegIndex covered by their
// sub-registers?
bool AllSuperRegsCovered;
CodeGenSubRegIndex(Record *R, unsigned Enum);
CodeGenSubRegIndex(StringRef N, StringRef Nspace, unsigned Enum);
const std::string &getName() const { return Name; }
const std::string &getNamespace() const { return Namespace; }
std::string getQualifiedName() const;
// Map of composite subreg indices.
typedef std::map<CodeGenSubRegIndex *, CodeGenSubRegIndex *,
deref<llvm::less>> CompMap;
// Returns the subreg index that results from composing this with Idx.
// Returns NULL if this and Idx don't compose.
CodeGenSubRegIndex *compose(CodeGenSubRegIndex *Idx) const {
CompMap::const_iterator I = Composed.find(Idx);
return I == Composed.end() ? nullptr : I->second;
}
// Add a composite subreg index: this+A = B.
// Return a conflicting composite, or NULL
CodeGenSubRegIndex *addComposite(CodeGenSubRegIndex *A,
CodeGenSubRegIndex *B) {
assert(A && B);
std::pair<CompMap::iterator, bool> Ins =
Composed.insert(std::make_pair(A, B));
// Synthetic subreg indices that aren't contiguous (for instance ARM
// register tuples) don't have a bit range, so it's OK to let
// B->Offset == -1. For the other cases, accumulate the offset and set
// the size here. Only do so if there is no offset yet though.
if ((Offset != (uint16_t)-1 && A->Offset != (uint16_t)-1) &&
(B->Offset == (uint16_t)-1)) {
B->Offset = Offset + A->Offset;
B->Size = A->Size;
}
return (Ins.second || Ins.first->second == B) ? nullptr
: Ins.first->second;
}
// Update the composite maps of components specified in 'ComposedOf'.
void updateComponents(CodeGenRegBank&);
// Return the map of composites.
const CompMap &getComposites() const { return Composed; }
// Compute LaneMask from Composed. Return LaneMask.
unsigned computeLaneMask() const;
private:
CompMap Composed;
};
inline bool operator<(const CodeGenSubRegIndex &A,
const CodeGenSubRegIndex &B) {
return A.EnumValue < B.EnumValue;
}
/// CodeGenRegister - Represents a register definition.
struct CodeGenRegister {
Record *TheDef;
unsigned EnumValue;
unsigned CostPerUse;
bool CoveredBySubRegs;
bool HasDisjunctSubRegs;
// Map SubRegIndex -> Register.
typedef std::map<CodeGenSubRegIndex *, CodeGenRegister *, deref<llvm::less>>
SubRegMap;
CodeGenRegister(Record *R, unsigned Enum);
const std::string &getName() const;
// Extract more information from TheDef. This is used to build an object
// graph after all CodeGenRegister objects have been created.
void buildObjectGraph(CodeGenRegBank&);
// Lazily compute a map of all sub-registers.
// This includes unique entries for all sub-sub-registers.
const SubRegMap &computeSubRegs(CodeGenRegBank&);
// Compute extra sub-registers by combining the existing sub-registers.
void computeSecondarySubRegs(CodeGenRegBank&);
// Add this as a super-register to all sub-registers after the sub-register
// graph has been built.
void computeSuperRegs(CodeGenRegBank&);
const SubRegMap &getSubRegs() const {
assert(SubRegsComplete && "Must precompute sub-registers");
return SubRegs;
}
// Add sub-registers to OSet following a pre-order defined by the .td file.
void addSubRegsPreOrder(SetVector<const CodeGenRegister*> &OSet,
CodeGenRegBank&) const;
// Return the sub-register index naming Reg as a sub-register of this
// register. Returns NULL if Reg is not a sub-register.
CodeGenSubRegIndex *getSubRegIndex(const CodeGenRegister *Reg) const {
return SubReg2Idx.lookup(Reg);
}
typedef std::vector<const CodeGenRegister*> SuperRegList;
// Get the list of super-registers in topological order, small to large.
// This is valid after computeSubRegs visits all registers during RegBank
// construction.
const SuperRegList &getSuperRegs() const {
assert(SubRegsComplete && "Must precompute sub-registers");
return SuperRegs;
}
// Get the list of ad hoc aliases. The graph is symmetric, so the list
// contains all registers in 'Aliases', and all registers that mention this
// register in 'Aliases'.
ArrayRef<CodeGenRegister*> getExplicitAliases() const {
return ExplicitAliases;
}
// Get the topological signature of this register. This is a small integer
// less than RegBank.getNumTopoSigs(). Registers with the same TopoSig have
// identical sub-register structure. That is, they support the same set of
// sub-register indices mapping to the same kind of sub-registers
// (TopoSig-wise).
unsigned getTopoSig() const {
assert(SuperRegsComplete && "TopoSigs haven't been computed yet.");
return TopoSig;
}
// List of register units in ascending order.
typedef SparseBitVector<> RegUnitList;
typedef SmallVector<unsigned, 16> RegUnitLaneMaskList;
// How many entries in RegUnitList are native?
RegUnitList NativeRegUnits;
// Get the list of register units.
// This is only valid after computeSubRegs() completes.
const RegUnitList &getRegUnits() const { return RegUnits; }
ArrayRef<unsigned> getRegUnitLaneMasks() const {
return makeArrayRef(RegUnitLaneMasks).slice(0, NativeRegUnits.count());
}
// Get the native register units. This is a prefix of getRegUnits().
RegUnitList getNativeRegUnits() const {
return NativeRegUnits;
}
void setRegUnitLaneMasks(const RegUnitLaneMaskList &LaneMasks) {
RegUnitLaneMasks = LaneMasks;
}
// Inherit register units from subregisters.
// Return true if the RegUnits changed.
bool inheritRegUnits(CodeGenRegBank &RegBank);
// Adopt a register unit for pressure tracking.
// A unit is adopted iff its unit number is >= NativeRegUnits.count().
void adoptRegUnit(unsigned RUID) { RegUnits.set(RUID); }
// Get the sum of this register's register unit weights.
unsigned getWeight(const CodeGenRegBank &RegBank) const;
// Canonically ordered set.
typedef std::vector<const CodeGenRegister*> Vec;
private:
bool SubRegsComplete;
bool SuperRegsComplete;
unsigned TopoSig;
// The sub-registers explicit in the .td file form a tree.
SmallVector<CodeGenSubRegIndex*, 8> ExplicitSubRegIndices;
SmallVector<CodeGenRegister*, 8> ExplicitSubRegs;
// Explicit ad hoc aliases, symmetrized to form an undirected graph.
SmallVector<CodeGenRegister*, 8> ExplicitAliases;
// Super-registers where this is the first explicit sub-register.
SuperRegList LeadingSuperRegs;
SubRegMap SubRegs;
SuperRegList SuperRegs;
DenseMap<const CodeGenRegister*, CodeGenSubRegIndex*> SubReg2Idx;
RegUnitList RegUnits;
RegUnitLaneMaskList RegUnitLaneMasks;
};
inline bool operator<(const CodeGenRegister &A, const CodeGenRegister &B) {
return A.EnumValue < B.EnumValue;
}
inline bool operator==(const CodeGenRegister &A, const CodeGenRegister &B) {
return A.EnumValue == B.EnumValue;
}
class CodeGenRegisterClass {
CodeGenRegister::Vec Members;
// Allocation orders. Order[0] always contains all registers in Members.
std::vector<SmallVector<Record*, 16> > Orders;
// Bit mask of sub-classes including this, indexed by their EnumValue.
BitVector SubClasses;
// List of super-classes, topologocally ordered to have the larger classes
// first. This is the same as sorting by EnumValue.
SmallVector<CodeGenRegisterClass*, 4> SuperClasses;
Record *TheDef;
std::string Name;
// For a synthesized class, inherit missing properties from the nearest
// super-class.
void inheritProperties(CodeGenRegBank&);
// Map SubRegIndex -> sub-class. This is the largest sub-class where all
// registers have a SubRegIndex sub-register.
DenseMap<const CodeGenSubRegIndex *, CodeGenRegisterClass *>
SubClassWithSubReg;
// Map SubRegIndex -> set of super-reg classes. This is all register
// classes SuperRC such that:
//
// R:SubRegIndex in this RC for all R in SuperRC.
//
DenseMap<const CodeGenSubRegIndex *, SmallPtrSet<CodeGenRegisterClass *, 8>>
SuperRegClasses;
// Bit vector of TopoSigs for the registers in this class. This will be
// very sparse on regular architectures.
BitVector TopoSigs;
public:
unsigned EnumValue;
std::string Namespace;
SmallVector<MVT::SimpleValueType, 4> VTs;
unsigned SpillSize;
unsigned SpillAlignment;
int CopyCost;
bool Allocatable;
std::string AltOrderSelect;
uint8_t AllocationPriority;
/// Contains the combination of the lane masks of all subregisters.
unsigned LaneMask;
/// True if there are at least 2 subregisters which do not interfere.
bool HasDisjunctSubRegs;
// Return the Record that defined this class, or NULL if the class was
// created by TableGen.
Record *getDef() const { return TheDef; }
const std::string &getName() const { return Name; }
std::string getQualifiedName() const;
ArrayRef<MVT::SimpleValueType> getValueTypes() const {return VTs;}
unsigned getNumValueTypes() const { return VTs.size(); }
MVT::SimpleValueType getValueTypeNum(unsigned VTNum) const {
if (VTNum < VTs.size())
return VTs[VTNum];
llvm_unreachable("VTNum greater than number of ValueTypes in RegClass!");
}
// Return true if this this class contains the register.
bool contains(const CodeGenRegister*) const;
// Returns true if RC is a subclass.
// RC is a sub-class of this class if it is a valid replacement for any
// instruction operand where a register of this classis required. It must
// satisfy these conditions:
//
// 1. All RC registers are also in this.
// 2. The RC spill size must not be smaller than our spill size.
// 3. RC spill alignment must be compatible with ours.
//
bool hasSubClass(const CodeGenRegisterClass *RC) const {
return SubClasses.test(RC->EnumValue);
}
// getSubClassWithSubReg - Returns the largest sub-class where all
// registers have a SubIdx sub-register.
CodeGenRegisterClass *
getSubClassWithSubReg(const CodeGenSubRegIndex *SubIdx) const {
return SubClassWithSubReg.lookup(SubIdx);
}
void setSubClassWithSubReg(const CodeGenSubRegIndex *SubIdx,
CodeGenRegisterClass *SubRC) {
SubClassWithSubReg[SubIdx] = SubRC;
}
// getSuperRegClasses - Returns a bit vector of all register classes
// containing only SubIdx super-registers of this class.
void getSuperRegClasses(const CodeGenSubRegIndex *SubIdx,
BitVector &Out) const;
// addSuperRegClass - Add a class containing only SudIdx super-registers.
void addSuperRegClass(CodeGenSubRegIndex *SubIdx,
CodeGenRegisterClass *SuperRC) {
SuperRegClasses[SubIdx].insert(SuperRC);
}
// getSubClasses - Returns a constant BitVector of subclasses indexed by
// EnumValue.
// The SubClasses vector includes an entry for this class.
const BitVector &getSubClasses() const { return SubClasses; }
// getSuperClasses - Returns a list of super classes ordered by EnumValue.
// The array does not include an entry for this class.
ArrayRef<CodeGenRegisterClass*> getSuperClasses() const {
return SuperClasses;
}
// Returns an ordered list of class members.
// The order of registers is the same as in the .td file.
// No = 0 is the default allocation order, No = 1 is the first alternative.
ArrayRef<Record*> getOrder(unsigned No = 0) const {
return Orders[No];
}
// Return the total number of allocation orders available.
unsigned getNumOrders() const { return Orders.size(); }
// Get the set of registers. This set contains the same registers as
// getOrder(0).
const CodeGenRegister::Vec &getMembers() const { return Members; }
// Get a bit vector of TopoSigs present in this register class.
const BitVector &getTopoSigs() const { return TopoSigs; }
// Populate a unique sorted list of units from a register set.
void buildRegUnitSet(std::vector<unsigned> &RegUnits) const;
CodeGenRegisterClass(CodeGenRegBank&, Record *R);
// A key representing the parts of a register class used for forming
// sub-classes. Note the ordering provided by this key is not the same as
// the topological order used for the EnumValues.
struct Key {
const CodeGenRegister::Vec *Members;
unsigned SpillSize;
unsigned SpillAlignment;
Key(const CodeGenRegister::Vec *M, unsigned S = 0, unsigned A = 0)
: Members(M), SpillSize(S), SpillAlignment(A) {}
Key(const CodeGenRegisterClass &RC)
: Members(&RC.getMembers()),
SpillSize(RC.SpillSize),
SpillAlignment(RC.SpillAlignment) {}
// Lexicographical order of (Members, SpillSize, SpillAlignment).
bool operator<(const Key&) const;
};
// Create a non-user defined register class.
CodeGenRegisterClass(CodeGenRegBank&, StringRef Name, Key Props);
// Called by CodeGenRegBank::CodeGenRegBank().
static void computeSubClasses(CodeGenRegBank&);
};
// Register units are used to model interference and register pressure.
// Every register is assigned one or more register units such that two
// registers overlap if and only if they have a register unit in common.
//
// Normally, one register unit is created per leaf register. Non-leaf
// registers inherit the units of their sub-registers.
struct RegUnit {
// Weight assigned to this RegUnit for estimating register pressure.
// This is useful when equalizing weights in register classes with mixed
// register topologies.
unsigned Weight;
// Each native RegUnit corresponds to one or two root registers. The full
// set of registers containing this unit can be computed as the union of
// these two registers and their super-registers.
const CodeGenRegister *Roots[2];
// Index into RegClassUnitSets where we can find the list of UnitSets that
// contain this unit.
unsigned RegClassUnitSetsIdx;
RegUnit() : Weight(0), RegClassUnitSetsIdx(0) {
Roots[0] = Roots[1] = nullptr;
}
ArrayRef<const CodeGenRegister*> getRoots() const {
assert(!(Roots[1] && !Roots[0]) && "Invalid roots array");
return makeArrayRef(Roots, !!Roots[0] + !!Roots[1]);
}
};
// Each RegUnitSet is a sorted vector with a name.
struct RegUnitSet {
typedef std::vector<unsigned>::const_iterator iterator;
std::string Name;
std::vector<unsigned> Units;
unsigned Weight; // Cache the sum of all unit weights.
unsigned Order; // Cache the sort key.
RegUnitSet() : Weight(0), Order(0) {}
};
// Base vector for identifying TopoSigs. The contents uniquely identify a
// TopoSig, only computeSuperRegs needs to know how.
typedef SmallVector<unsigned, 16> TopoSigId;
// CodeGenRegBank - Represent a target's registers and the relations between
// them.
class CodeGenRegBank {
SetTheory Sets;
std::deque<CodeGenSubRegIndex> SubRegIndices;
DenseMap<Record*, CodeGenSubRegIndex*> Def2SubRegIdx;
CodeGenSubRegIndex *createSubRegIndex(StringRef Name, StringRef NameSpace);
typedef std::map<SmallVector<CodeGenSubRegIndex*, 8>,
CodeGenSubRegIndex*> ConcatIdxMap;
ConcatIdxMap ConcatIdx;
// Registers.
std::deque<CodeGenRegister> Registers;
StringMap<CodeGenRegister*> RegistersByName;
DenseMap<Record*, CodeGenRegister*> Def2Reg;
unsigned NumNativeRegUnits;
std::map<TopoSigId, unsigned> TopoSigs;
// Includes native (0..NumNativeRegUnits-1) and adopted register units.
SmallVector<RegUnit, 8> RegUnits;
// Register classes.
std::list<CodeGenRegisterClass> RegClasses;
DenseMap<Record*, CodeGenRegisterClass*> Def2RC;
typedef std::map<CodeGenRegisterClass::Key, CodeGenRegisterClass*> RCKeyMap;
RCKeyMap Key2RC;
// Remember each unique set of register units. Initially, this contains a
// unique set for each register class. Simliar sets are coalesced with
// pruneUnitSets and new supersets are inferred during computeRegUnitSets.
std::vector<RegUnitSet> RegUnitSets;
// Map RegisterClass index to the index of the RegUnitSet that contains the
// class's units and any inferred RegUnit supersets.
//
// NOTE: This could grow beyond the number of register classes when we map
// register units to lists of unit sets. If the list of unit sets does not
// already exist for a register class, we create a new entry in this vector.
std::vector<std::vector<unsigned> > RegClassUnitSets;
// Give each register unit set an order based on sorting criteria.
std::vector<unsigned> RegUnitSetOrder;
// Add RC to *2RC maps.
void addToMaps(CodeGenRegisterClass*);
// Create a synthetic sub-class if it is missing.
CodeGenRegisterClass *getOrCreateSubClass(const CodeGenRegisterClass *RC,
const CodeGenRegister::Vec *Membs,
StringRef Name);
// Infer missing register classes.
void computeInferredRegisterClasses();
void inferCommonSubClass(CodeGenRegisterClass *RC);
void inferSubClassWithSubReg(CodeGenRegisterClass *RC);
void inferMatchingSuperRegClass(CodeGenRegisterClass *RC) {
inferMatchingSuperRegClass(RC, RegClasses.begin());
}
void inferMatchingSuperRegClass(
CodeGenRegisterClass *RC,
std::list<CodeGenRegisterClass>::iterator FirstSubRegRC);
// Iteratively prune unit sets.
void pruneUnitSets();
// Compute a weight for each register unit created during getSubRegs.
void computeRegUnitWeights();
// Create a RegUnitSet for each RegClass and infer superclasses.
void computeRegUnitSets();
// Populate the Composite map from sub-register relationships.
void computeComposites();
// Compute a lane mask for each sub-register index.
void computeSubRegLaneMasks();
/// Computes a lane mask for each register unit enumerated by a physical
/// register.
void computeRegUnitLaneMasks();
public:
CodeGenRegBank(RecordKeeper&);
SetTheory &getSets() { return Sets; }
// Sub-register indices. The first NumNamedIndices are defined by the user
// in the .td files. The rest are synthesized such that all sub-registers
// have a unique name.
const std::deque<CodeGenSubRegIndex> &getSubRegIndices() const {
return SubRegIndices;
}
// Find a SubRegIndex form its Record def.
CodeGenSubRegIndex *getSubRegIdx(Record*);
// Find or create a sub-register index representing the A+B composition.
CodeGenSubRegIndex *getCompositeSubRegIndex(CodeGenSubRegIndex *A,
CodeGenSubRegIndex *B);
// Find or create a sub-register index representing the concatenation of
// non-overlapping sibling indices.
CodeGenSubRegIndex *
getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8>&);
void
addConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8> &Parts,
CodeGenSubRegIndex *Idx) {
ConcatIdx.insert(std::make_pair(Parts, Idx));
}
const std::deque<CodeGenRegister> &getRegisters() { return Registers; }
const StringMap<CodeGenRegister*> &getRegistersByName() {
return RegistersByName;
}
// Find a register from its Record def.
CodeGenRegister *getReg(Record*);
// Get a Register's index into the Registers array.
unsigned getRegIndex(const CodeGenRegister *Reg) const {
return Reg->EnumValue - 1;
}
// Return the number of allocated TopoSigs. The first TopoSig representing
// leaf registers is allocated number 0.
unsigned getNumTopoSigs() const {
return TopoSigs.size();
}
// Find or create a TopoSig for the given TopoSigId.
// This function is only for use by CodeGenRegister::computeSuperRegs().
// Others should simply use Reg->getTopoSig().
unsigned getTopoSig(const TopoSigId &Id) {
return TopoSigs.insert(std::make_pair(Id, TopoSigs.size())).first->second;
}
// Create a native register unit that is associated with one or two root
// registers.
unsigned newRegUnit(CodeGenRegister *R0, CodeGenRegister *R1 = nullptr) {
RegUnits.resize(RegUnits.size() + 1);
RegUnits.back().Roots[0] = R0;
RegUnits.back().Roots[1] = R1;
return RegUnits.size() - 1;
}
// Create a new non-native register unit that can be adopted by a register
// to increase its pressure. Note that NumNativeRegUnits is not increased.
unsigned newRegUnit(unsigned Weight) {
RegUnits.resize(RegUnits.size() + 1);
RegUnits.back().Weight = Weight;
return RegUnits.size() - 1;
}
// Native units are the singular unit of a leaf register. Register aliasing
// is completely characterized by native units. Adopted units exist to give
// register additional weight but don't affect aliasing.
bool isNativeUnit(unsigned RUID) {
return RUID < NumNativeRegUnits;
}
unsigned getNumNativeRegUnits() const {
return NumNativeRegUnits;
}
RegUnit &getRegUnit(unsigned RUID) { return RegUnits[RUID]; }
const RegUnit &getRegUnit(unsigned RUID) const { return RegUnits[RUID]; }
std::list<CodeGenRegisterClass> &getRegClasses() { return RegClasses; }
const std::list<CodeGenRegisterClass> &getRegClasses() const {
return RegClasses;
}
// Find a register class from its def.
CodeGenRegisterClass *getRegClass(Record*);
/// getRegisterClassForRegister - Find the register class that contains the
/// specified physical register. If the register is not in a register
/// class, return null. If the register is in multiple classes, and the
/// classes have a superset-subset relationship and the same set of types,
/// return the superclass. Otherwise return null.
const CodeGenRegisterClass* getRegClassForRegister(Record *R);
// Get the sum of unit weights.
unsigned getRegUnitSetWeight(const std::vector<unsigned> &Units) const {
unsigned Weight = 0;
for (std::vector<unsigned>::const_iterator
I = Units.begin(), E = Units.end(); I != E; ++I)
Weight += getRegUnit(*I).Weight;
return Weight;
}
unsigned getRegSetIDAt(unsigned Order) const {
return RegUnitSetOrder[Order];
}
const RegUnitSet &getRegSetAt(unsigned Order) const {
return RegUnitSets[RegUnitSetOrder[Order]];
}
// Increase a RegUnitWeight.
void increaseRegUnitWeight(unsigned RUID, unsigned Inc) {
getRegUnit(RUID).Weight += Inc;
}
// Get the number of register pressure dimensions.
unsigned getNumRegPressureSets() const { return RegUnitSets.size(); }
// Get a set of register unit IDs for a given dimension of pressure.
const RegUnitSet &getRegPressureSet(unsigned Idx) const {
return RegUnitSets[Idx];
}
// The number of pressure set lists may be larget than the number of
// register classes if some register units appeared in a list of sets that
// did not correspond to an existing register class.
unsigned getNumRegClassPressureSetLists() const {
return RegClassUnitSets.size();
}
// Get a list of pressure set IDs for a register class. Liveness of a
// register in this class impacts each pressure set in this list by the
// weight of the register. An exact solution requires all registers in a
// class to have the same class, but it is not strictly guaranteed.
ArrayRef<unsigned> getRCPressureSetIDs(unsigned RCIdx) const {
return RegClassUnitSets[RCIdx];
}
// Computed derived records such as missing sub-register indices.
void computeDerivedInfo();
// Compute the set of registers completely covered by the registers in Regs.
// The returned BitVector will have a bit set for each register in Regs,
// all sub-registers, and all super-registers that are covered by the
// registers in Regs.
//
// This is used to compute the mask of call-preserved registers from a list
// of callee-saves.
BitVector computeCoveredRegisters(ArrayRef<Record*> Regs);
// Bit mask of lanes that cover their registers. A sub-register index whose
// LaneMask is contained in CoveringLanes will be completely covered by
// another sub-register with the same or larger lane mask.
unsigned CoveringLanes;
};
}
#endif
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/DAGISelMatcherEmitter.cpp | //===- DAGISelMatcherEmitter.cpp - Matcher Emitter ------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains code to generate C++ code for a matcher.
//
//===----------------------------------------------------------------------===//
#include "DAGISelMatcher.h"
#include "CodeGenDAGPatterns.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/TableGen/Record.h"
using namespace llvm;
enum {
CommentIndent = 30
};
// To reduce generated source code size.
static cl::opt<bool>
OmitComments("omit-comments", cl::desc("Do not generate comments"),
cl::init(false));
namespace {
class MatcherTableEmitter {
const CodeGenDAGPatterns &CGP;
DenseMap<TreePattern *, unsigned> NodePredicateMap;
std::vector<TreePredicateFn> NodePredicates;
StringMap<unsigned> PatternPredicateMap;
std::vector<std::string> PatternPredicates;
DenseMap<const ComplexPattern*, unsigned> ComplexPatternMap;
std::vector<const ComplexPattern*> ComplexPatterns;
DenseMap<Record*, unsigned> NodeXFormMap;
std::vector<Record*> NodeXForms;
public:
MatcherTableEmitter(const CodeGenDAGPatterns &cgp)
: CGP(cgp) {}
unsigned EmitMatcherList(const Matcher *N, unsigned Indent,
unsigned StartIdx, formatted_raw_ostream &OS);
void EmitPredicateFunctions(formatted_raw_ostream &OS);
void EmitHistogram(const Matcher *N, formatted_raw_ostream &OS);
private:
unsigned EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
formatted_raw_ostream &OS);
unsigned getNodePredicate(TreePredicateFn Pred) {
unsigned &Entry = NodePredicateMap[Pred.getOrigPatFragRecord()];
if (Entry == 0) {
NodePredicates.push_back(Pred);
Entry = NodePredicates.size();
}
return Entry-1;
}
unsigned getPatternPredicate(StringRef PredName) {
unsigned &Entry = PatternPredicateMap[PredName];
if (Entry == 0) {
PatternPredicates.push_back(PredName.str());
Entry = PatternPredicates.size();
}
return Entry-1;
}
unsigned getComplexPat(const ComplexPattern &P) {
unsigned &Entry = ComplexPatternMap[&P];
if (Entry == 0) {
ComplexPatterns.push_back(&P);
Entry = ComplexPatterns.size();
}
return Entry-1;
}
unsigned getNodeXFormID(Record *Rec) {
unsigned &Entry = NodeXFormMap[Rec];
if (Entry == 0) {
NodeXForms.push_back(Rec);
Entry = NodeXForms.size();
}
return Entry-1;
}
};
} // end anonymous namespace.
static unsigned GetVBRSize(unsigned Val) {
if (Val <= 127) return 1;
unsigned NumBytes = 0;
while (Val >= 128) {
Val >>= 7;
++NumBytes;
}
return NumBytes+1;
}
/// EmitVBRValue - Emit the specified value as a VBR, returning the number of
/// bytes emitted.
static uint64_t EmitVBRValue(uint64_t Val, raw_ostream &OS) {
if (Val <= 127) {
OS << Val << ", ";
return 1;
}
uint64_t InVal = Val;
unsigned NumBytes = 0;
while (Val >= 128) {
OS << (Val&127) << "|128,";
Val >>= 7;
++NumBytes;
}
OS << Val;
if (!OmitComments)
OS << "/*" << InVal << "*/";
OS << ", ";
return NumBytes+1;
}
/// EmitMatcher - Emit bytes for the specified matcher and return
/// the number of bytes emitted.
unsigned MatcherTableEmitter::
EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
formatted_raw_ostream &OS) {
OS.PadToColumn(Indent*2);
switch (N->getKind()) {
case Matcher::Scope: {
const ScopeMatcher *SM = cast<ScopeMatcher>(N);
assert(SM->getNext() == nullptr && "Shouldn't have next after scope");
unsigned StartIdx = CurrentIdx;
// Emit all of the children.
for (unsigned i = 0, e = SM->getNumChildren(); i != e; ++i) {
if (i == 0) {
OS << "OPC_Scope, ";
++CurrentIdx;
} else {
if (!OmitComments) {
OS << "/*" << CurrentIdx << "*/";
OS.PadToColumn(Indent*2) << "/*Scope*/ ";
} else
OS.PadToColumn(Indent*2);
}
// We need to encode the child and the offset of the failure code before
// emitting either of them. Handle this by buffering the output into a
// string while we get the size. Unfortunately, the offset of the
// children depends on the VBR size of the child, so for large children we
// have to iterate a bit.
SmallString<128> TmpBuf;
unsigned ChildSize = 0;
unsigned VBRSize = 0;
do {
VBRSize = GetVBRSize(ChildSize);
TmpBuf.clear();
raw_svector_ostream OS(TmpBuf);
formatted_raw_ostream FOS(OS);
ChildSize = EmitMatcherList(SM->getChild(i), Indent+1,
CurrentIdx+VBRSize, FOS);
} while (GetVBRSize(ChildSize) != VBRSize);
assert(ChildSize != 0 && "Should not have a zero-sized child!");
CurrentIdx += EmitVBRValue(ChildSize, OS);
if (!OmitComments) {
OS << "/*->" << CurrentIdx+ChildSize << "*/";
if (i == 0)
OS.PadToColumn(CommentIndent) << "// " << SM->getNumChildren()
<< " children in Scope";
}
OS << '\n' << TmpBuf;
CurrentIdx += ChildSize;
}
// Emit a zero as a sentinel indicating end of 'Scope'.
if (!OmitComments)
OS << "/*" << CurrentIdx << "*/";
OS.PadToColumn(Indent*2) << "0, ";
if (!OmitComments)
OS << "/*End of Scope*/";
OS << '\n';
return CurrentIdx - StartIdx + 1;
}
case Matcher::RecordNode:
OS << "OPC_RecordNode,";
if (!OmitComments)
OS.PadToColumn(CommentIndent) << "// #"
<< cast<RecordMatcher>(N)->getResultNo() << " = "
<< cast<RecordMatcher>(N)->getWhatFor();
OS << '\n';
return 1;
case Matcher::RecordChild:
OS << "OPC_RecordChild" << cast<RecordChildMatcher>(N)->getChildNo()
<< ',';
if (!OmitComments)
OS.PadToColumn(CommentIndent) << "// #"
<< cast<RecordChildMatcher>(N)->getResultNo() << " = "
<< cast<RecordChildMatcher>(N)->getWhatFor();
OS << '\n';
return 1;
case Matcher::RecordMemRef:
OS << "OPC_RecordMemRef,\n";
return 1;
case Matcher::CaptureGlueInput:
OS << "OPC_CaptureGlueInput,\n";
return 1;
case Matcher::MoveChild:
OS << "OPC_MoveChild, " << cast<MoveChildMatcher>(N)->getChildNo() << ",\n";
return 2;
case Matcher::MoveParent:
OS << "OPC_MoveParent,\n";
return 1;
case Matcher::CheckSame:
OS << "OPC_CheckSame, "
<< cast<CheckSameMatcher>(N)->getMatchNumber() << ",\n";
return 2;
case Matcher::CheckChildSame:
OS << "OPC_CheckChild"
<< cast<CheckChildSameMatcher>(N)->getChildNo() << "Same, "
<< cast<CheckChildSameMatcher>(N)->getMatchNumber() << ",\n";
return 2;
case Matcher::CheckPatternPredicate: {
StringRef Pred =cast<CheckPatternPredicateMatcher>(N)->getPredicate();
OS << "OPC_CheckPatternPredicate, " << getPatternPredicate(Pred) << ',';
if (!OmitComments)
OS.PadToColumn(CommentIndent) << "// " << Pred;
OS << '\n';
return 2;
}
case Matcher::CheckPredicate: {
TreePredicateFn Pred = cast<CheckPredicateMatcher>(N)->getPredicate();
OS << "OPC_CheckPredicate, " << getNodePredicate(Pred) << ',';
if (!OmitComments)
OS.PadToColumn(CommentIndent) << "// " << Pred.getFnName();
OS << '\n';
return 2;
}
case Matcher::CheckOpcode:
OS << "OPC_CheckOpcode, TARGET_VAL("
<< cast<CheckOpcodeMatcher>(N)->getOpcode().getEnumName() << "),\n";
return 3;
case Matcher::SwitchOpcode:
case Matcher::SwitchType: {
unsigned StartIdx = CurrentIdx;
unsigned NumCases;
if (const SwitchOpcodeMatcher *SOM = dyn_cast<SwitchOpcodeMatcher>(N)) {
OS << "OPC_SwitchOpcode ";
NumCases = SOM->getNumCases();
} else {
OS << "OPC_SwitchType ";
NumCases = cast<SwitchTypeMatcher>(N)->getNumCases();
}
if (!OmitComments)
OS << "/*" << NumCases << " cases */";
OS << ", ";
++CurrentIdx;
// For each case we emit the size, then the opcode, then the matcher.
for (unsigned i = 0, e = NumCases; i != e; ++i) {
const Matcher *Child;
unsigned IdxSize;
if (const SwitchOpcodeMatcher *SOM = dyn_cast<SwitchOpcodeMatcher>(N)) {
Child = SOM->getCaseMatcher(i);
IdxSize = 2; // size of opcode in table is 2 bytes.
} else {
Child = cast<SwitchTypeMatcher>(N)->getCaseMatcher(i);
IdxSize = 1; // size of type in table is 1 byte.
}
// We need to encode the opcode and the offset of the case code before
// emitting the case code. Handle this by buffering the output into a
// string while we get the size. Unfortunately, the offset of the
// children depends on the VBR size of the child, so for large children we
// have to iterate a bit.
SmallString<128> TmpBuf;
unsigned ChildSize = 0;
unsigned VBRSize = 0;
do {
VBRSize = GetVBRSize(ChildSize);
TmpBuf.clear();
raw_svector_ostream OS(TmpBuf);
formatted_raw_ostream FOS(OS);
ChildSize = EmitMatcherList(Child, Indent+1, CurrentIdx+VBRSize+IdxSize,
FOS);
} while (GetVBRSize(ChildSize) != VBRSize);
assert(ChildSize != 0 && "Should not have a zero-sized child!");
if (i != 0) {
if (!OmitComments)
OS << "/*" << CurrentIdx << "*/";
OS.PadToColumn(Indent*2);
if (!OmitComments)
OS << (isa<SwitchOpcodeMatcher>(N) ?
"/*SwitchOpcode*/ " : "/*SwitchType*/ ");
}
// Emit the VBR.
CurrentIdx += EmitVBRValue(ChildSize, OS);
if (const SwitchOpcodeMatcher *SOM = dyn_cast<SwitchOpcodeMatcher>(N))
OS << "TARGET_VAL(" << SOM->getCaseOpcode(i).getEnumName() << "),";
else
OS << getEnumName(cast<SwitchTypeMatcher>(N)->getCaseType(i)) << ',';
CurrentIdx += IdxSize;
if (!OmitComments)
OS << "// ->" << CurrentIdx+ChildSize;
OS << '\n';
OS << TmpBuf;
CurrentIdx += ChildSize;
}
// Emit the final zero to terminate the switch.
if (!OmitComments)
OS << "/*" << CurrentIdx << "*/";
OS.PadToColumn(Indent*2) << "0, ";
if (!OmitComments)
OS << (isa<SwitchOpcodeMatcher>(N) ?
"// EndSwitchOpcode" : "// EndSwitchType");
OS << '\n';
++CurrentIdx;
return CurrentIdx-StartIdx;
}
case Matcher::CheckType:
assert(cast<CheckTypeMatcher>(N)->getResNo() == 0 &&
"FIXME: Add support for CheckType of resno != 0");
OS << "OPC_CheckType, "
<< getEnumName(cast<CheckTypeMatcher>(N)->getType()) << ",\n";
return 2;
case Matcher::CheckChildType:
OS << "OPC_CheckChild"
<< cast<CheckChildTypeMatcher>(N)->getChildNo() << "Type, "
<< getEnumName(cast<CheckChildTypeMatcher>(N)->getType()) << ",\n";
return 2;
case Matcher::CheckInteger: {
OS << "OPC_CheckInteger, ";
unsigned Bytes=1+EmitVBRValue(cast<CheckIntegerMatcher>(N)->getValue(), OS);
OS << '\n';
return Bytes;
}
case Matcher::CheckChildInteger: {
OS << "OPC_CheckChild" << cast<CheckChildIntegerMatcher>(N)->getChildNo()
<< "Integer, ";
unsigned Bytes=1+EmitVBRValue(cast<CheckChildIntegerMatcher>(N)->getValue(),
OS);
OS << '\n';
return Bytes;
}
case Matcher::CheckCondCode:
OS << "OPC_CheckCondCode, ISD::"
<< cast<CheckCondCodeMatcher>(N)->getCondCodeName() << ",\n";
return 2;
case Matcher::CheckValueType:
OS << "OPC_CheckValueType, MVT::"
<< cast<CheckValueTypeMatcher>(N)->getTypeName() << ",\n";
return 2;
case Matcher::CheckComplexPat: {
const CheckComplexPatMatcher *CCPM = cast<CheckComplexPatMatcher>(N);
const ComplexPattern &Pattern = CCPM->getPattern();
OS << "OPC_CheckComplexPat, /*CP*/" << getComplexPat(Pattern) << ", /*#*/"
<< CCPM->getMatchNumber() << ',';
if (!OmitComments) {
OS.PadToColumn(CommentIndent) << "// " << Pattern.getSelectFunc();
OS << ":$" << CCPM->getName();
for (unsigned i = 0, e = Pattern.getNumOperands(); i != e; ++i)
OS << " #" << CCPM->getFirstResult()+i;
if (Pattern.hasProperty(SDNPHasChain))
OS << " + chain result";
}
OS << '\n';
return 3;
}
case Matcher::CheckAndImm: {
OS << "OPC_CheckAndImm, ";
unsigned Bytes=1+EmitVBRValue(cast<CheckAndImmMatcher>(N)->getValue(), OS);
OS << '\n';
return Bytes;
}
case Matcher::CheckOrImm: {
OS << "OPC_CheckOrImm, ";
unsigned Bytes = 1+EmitVBRValue(cast<CheckOrImmMatcher>(N)->getValue(), OS);
OS << '\n';
return Bytes;
}
case Matcher::CheckFoldableChainNode:
OS << "OPC_CheckFoldableChainNode,\n";
return 1;
case Matcher::EmitInteger: {
int64_t Val = cast<EmitIntegerMatcher>(N)->getValue();
OS << "OPC_EmitInteger, "
<< getEnumName(cast<EmitIntegerMatcher>(N)->getVT()) << ", ";
unsigned Bytes = 2+EmitVBRValue(Val, OS);
OS << '\n';
return Bytes;
}
case Matcher::EmitStringInteger: {
const std::string &Val = cast<EmitStringIntegerMatcher>(N)->getValue();
// These should always fit into one byte.
OS << "OPC_EmitInteger, "
<< getEnumName(cast<EmitStringIntegerMatcher>(N)->getVT()) << ", "
<< Val << ",\n";
return 3;
}
case Matcher::EmitRegister: {
const EmitRegisterMatcher *Matcher = cast<EmitRegisterMatcher>(N);
const CodeGenRegister *Reg = Matcher->getReg();
// If the enum value of the register is larger than one byte can handle,
// use EmitRegister2.
if (Reg && Reg->EnumValue > 255) {
OS << "OPC_EmitRegister2, " << getEnumName(Matcher->getVT()) << ", ";
OS << "TARGET_VAL(" << getQualifiedName(Reg->TheDef) << "),\n";
return 4;
} else {
OS << "OPC_EmitRegister, " << getEnumName(Matcher->getVT()) << ", ";
if (Reg) {
OS << getQualifiedName(Reg->TheDef) << ",\n";
} else {
OS << "0 ";
if (!OmitComments)
OS << "/*zero_reg*/";
OS << ",\n";
}
return 3;
}
}
case Matcher::EmitConvertToTarget:
OS << "OPC_EmitConvertToTarget, "
<< cast<EmitConvertToTargetMatcher>(N)->getSlot() << ",\n";
return 2;
case Matcher::EmitMergeInputChains: {
const EmitMergeInputChainsMatcher *MN =
cast<EmitMergeInputChainsMatcher>(N);
// Handle the specialized forms OPC_EmitMergeInputChains1_0 and 1_1.
if (MN->getNumNodes() == 1 && MN->getNode(0) < 2) {
OS << "OPC_EmitMergeInputChains1_" << MN->getNode(0) << ",\n";
return 1;
}
OS << "OPC_EmitMergeInputChains, " << MN->getNumNodes() << ", ";
for (unsigned i = 0, e = MN->getNumNodes(); i != e; ++i)
OS << MN->getNode(i) << ", ";
OS << '\n';
return 2+MN->getNumNodes();
}
case Matcher::EmitCopyToReg:
OS << "OPC_EmitCopyToReg, "
<< cast<EmitCopyToRegMatcher>(N)->getSrcSlot() << ", "
<< getQualifiedName(cast<EmitCopyToRegMatcher>(N)->getDestPhysReg())
<< ",\n";
return 3;
case Matcher::EmitNodeXForm: {
const EmitNodeXFormMatcher *XF = cast<EmitNodeXFormMatcher>(N);
OS << "OPC_EmitNodeXForm, " << getNodeXFormID(XF->getNodeXForm()) << ", "
<< XF->getSlot() << ',';
if (!OmitComments)
OS.PadToColumn(CommentIndent) << "// "<<XF->getNodeXForm()->getName();
OS <<'\n';
return 3;
}
case Matcher::EmitNode:
case Matcher::MorphNodeTo: {
const EmitNodeMatcherCommon *EN = cast<EmitNodeMatcherCommon>(N);
OS << (isa<EmitNodeMatcher>(EN) ? "OPC_EmitNode" : "OPC_MorphNodeTo");
OS << ", TARGET_VAL(" << EN->getOpcodeName() << "), 0";
if (EN->hasChain()) OS << "|OPFL_Chain";
if (EN->hasInFlag()) OS << "|OPFL_GlueInput";
if (EN->hasOutFlag()) OS << "|OPFL_GlueOutput";
if (EN->hasMemRefs()) OS << "|OPFL_MemRefs";
if (EN->getNumFixedArityOperands() != -1)
OS << "|OPFL_Variadic" << EN->getNumFixedArityOperands();
OS << ",\n";
OS.PadToColumn(Indent*2+4) << EN->getNumVTs();
if (!OmitComments)
OS << "/*#VTs*/";
OS << ", ";
for (unsigned i = 0, e = EN->getNumVTs(); i != e; ++i)
OS << getEnumName(EN->getVT(i)) << ", ";
OS << EN->getNumOperands();
if (!OmitComments)
OS << "/*#Ops*/";
OS << ", ";
unsigned NumOperandBytes = 0;
for (unsigned i = 0, e = EN->getNumOperands(); i != e; ++i)
NumOperandBytes += EmitVBRValue(EN->getOperand(i), OS);
if (!OmitComments) {
// Print the result #'s for EmitNode.
if (const EmitNodeMatcher *E = dyn_cast<EmitNodeMatcher>(EN)) {
if (unsigned NumResults = EN->getNumVTs()) {
OS.PadToColumn(CommentIndent) << "// Results =";
unsigned First = E->getFirstResultSlot();
for (unsigned i = 0; i != NumResults; ++i)
OS << " #" << First+i;
}
}
OS << '\n';
if (const MorphNodeToMatcher *SNT = dyn_cast<MorphNodeToMatcher>(N)) {
OS.PadToColumn(Indent*2) << "// Src: "
<< *SNT->getPattern().getSrcPattern() << " - Complexity = "
<< SNT->getPattern().getPatternComplexity(CGP) << '\n';
OS.PadToColumn(Indent*2) << "// Dst: "
<< *SNT->getPattern().getDstPattern() << '\n';
}
} else
OS << '\n';
return 6+EN->getNumVTs()+NumOperandBytes;
}
case Matcher::MarkGlueResults: {
const MarkGlueResultsMatcher *CFR = cast<MarkGlueResultsMatcher>(N);
OS << "OPC_MarkGlueResults, " << CFR->getNumNodes() << ", ";
unsigned NumOperandBytes = 0;
for (unsigned i = 0, e = CFR->getNumNodes(); i != e; ++i)
NumOperandBytes += EmitVBRValue(CFR->getNode(i), OS);
OS << '\n';
return 2+NumOperandBytes;
}
case Matcher::CompleteMatch: {
const CompleteMatchMatcher *CM = cast<CompleteMatchMatcher>(N);
OS << "OPC_CompleteMatch, " << CM->getNumResults() << ", ";
unsigned NumResultBytes = 0;
for (unsigned i = 0, e = CM->getNumResults(); i != e; ++i)
NumResultBytes += EmitVBRValue(CM->getResult(i), OS);
OS << '\n';
if (!OmitComments) {
OS.PadToColumn(Indent*2) << "// Src: "
<< *CM->getPattern().getSrcPattern() << " - Complexity = "
<< CM->getPattern().getPatternComplexity(CGP) << '\n';
OS.PadToColumn(Indent*2) << "// Dst: "
<< *CM->getPattern().getDstPattern();
}
OS << '\n';
return 2 + NumResultBytes;
}
}
llvm_unreachable("Unreachable");
}
/// EmitMatcherList - Emit the bytes for the specified matcher subtree.
unsigned MatcherTableEmitter::
EmitMatcherList(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
formatted_raw_ostream &OS) {
unsigned Size = 0;
while (N) {
if (!OmitComments)
OS << "/*" << CurrentIdx << "*/";
unsigned MatcherSize = EmitMatcher(N, Indent, CurrentIdx, OS);
Size += MatcherSize;
CurrentIdx += MatcherSize;
// If there are other nodes in this list, iterate to them, otherwise we're
// done.
N = N->getNext();
}
return Size;
}
void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
// Emit pattern predicates.
if (!PatternPredicates.empty()) {
OS << "bool CheckPatternPredicate(unsigned PredNo) const override {\n";
OS << " switch (PredNo) {\n";
OS << " default: llvm_unreachable(\"Invalid predicate in table?\");\n";
for (unsigned i = 0, e = PatternPredicates.size(); i != e; ++i)
OS << " case " << i << ": return " << PatternPredicates[i] << ";\n";
OS << " }\n";
OS << "}\n\n";
}
// Emit Node predicates.
// FIXME: Annoyingly, these are stored by name, which we never even emit. Yay?
StringMap<TreePattern*> PFsByName;
for (CodeGenDAGPatterns::pf_iterator I = CGP.pf_begin(), E = CGP.pf_end();
I != E; ++I)
PFsByName[I->first->getName()] = I->second.get();
if (!NodePredicates.empty()) {
OS << "bool CheckNodePredicate(SDNode *Node,\n";
OS << " unsigned PredNo) const override {\n";
OS << " switch (PredNo) {\n";
OS << " default: llvm_unreachable(\"Invalid predicate in table?\");\n";
for (unsigned i = 0, e = NodePredicates.size(); i != e; ++i) {
// Emit the predicate code corresponding to this pattern.
TreePredicateFn PredFn = NodePredicates[i];
assert(!PredFn.isAlwaysTrue() && "No code in this predicate");
OS << " case " << i << ": { // " << NodePredicates[i].getFnName() <<'\n';
OS << PredFn.getCodeToRunOnSDNode() << "\n }\n";
}
OS << " }\n";
OS << "}\n\n";
}
// Emit CompletePattern matchers.
// FIXME: This should be const.
if (!ComplexPatterns.empty()) {
OS << "bool CheckComplexPattern(SDNode *Root, SDNode *Parent,\n";
OS << " SDValue N, unsigned PatternNo,\n";
OS << " SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) override {\n";
OS << " unsigned NextRes = Result.size();\n";
OS << " switch (PatternNo) {\n";
OS << " default: llvm_unreachable(\"Invalid pattern # in table?\");\n";
for (unsigned i = 0, e = ComplexPatterns.size(); i != e; ++i) {
const ComplexPattern &P = *ComplexPatterns[i];
unsigned NumOps = P.getNumOperands();
if (P.hasProperty(SDNPHasChain))
++NumOps; // Get the chained node too.
OS << " case " << i << ":\n";
OS << " Result.resize(NextRes+" << NumOps << ");\n";
OS << " return " << P.getSelectFunc();
OS << "(";
// If the complex pattern wants the root of the match, pass it in as the
// first argument.
if (P.hasProperty(SDNPWantRoot))
OS << "Root, ";
// If the complex pattern wants the parent of the operand being matched,
// pass it in as the next argument.
if (P.hasProperty(SDNPWantParent))
OS << "Parent, ";
OS << "N";
for (unsigned i = 0; i != NumOps; ++i)
OS << ", Result[NextRes+" << i << "].first";
OS << ");\n";
}
OS << " }\n";
OS << "}\n\n";
}
// Emit SDNodeXForm handlers.
// FIXME: This should be const.
if (!NodeXForms.empty()) {
OS << "SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) override {\n";
OS << " switch (XFormNo) {\n";
OS << " default: llvm_unreachable(\"Invalid xform # in table?\");\n";
// FIXME: The node xform could take SDValue's instead of SDNode*'s.
for (unsigned i = 0, e = NodeXForms.size(); i != e; ++i) {
const CodeGenDAGPatterns::NodeXForm &Entry =
CGP.getSDNodeTransform(NodeXForms[i]);
Record *SDNode = Entry.first;
const std::string &Code = Entry.second;
OS << " case " << i << ": { ";
if (!OmitComments)
OS << "// " << NodeXForms[i]->getName();
OS << '\n';
std::string ClassName = CGP.getSDNodeInfo(SDNode).getSDClassName();
if (ClassName == "SDNode")
OS << " SDNode *N = V.getNode();\n";
else
OS << " " << ClassName << " *N = cast<" << ClassName
<< ">(V.getNode());\n";
OS << Code << "\n }\n";
}
OS << " }\n";
OS << "}\n\n";
}
}
static void BuildHistogram(const Matcher *M, std::vector<unsigned> &OpcodeFreq){
for (; M != nullptr; M = M->getNext()) {
// Count this node.
if (unsigned(M->getKind()) >= OpcodeFreq.size())
OpcodeFreq.resize(M->getKind()+1);
OpcodeFreq[M->getKind()]++;
// Handle recursive nodes.
if (const ScopeMatcher *SM = dyn_cast<ScopeMatcher>(M)) {
for (unsigned i = 0, e = SM->getNumChildren(); i != e; ++i)
BuildHistogram(SM->getChild(i), OpcodeFreq);
} else if (const SwitchOpcodeMatcher *SOM =
dyn_cast<SwitchOpcodeMatcher>(M)) {
for (unsigned i = 0, e = SOM->getNumCases(); i != e; ++i)
BuildHistogram(SOM->getCaseMatcher(i), OpcodeFreq);
} else if (const SwitchTypeMatcher *STM = dyn_cast<SwitchTypeMatcher>(M)) {
for (unsigned i = 0, e = STM->getNumCases(); i != e; ++i)
BuildHistogram(STM->getCaseMatcher(i), OpcodeFreq);
}
}
}
void MatcherTableEmitter::EmitHistogram(const Matcher *M,
formatted_raw_ostream &OS) {
if (OmitComments)
return;
std::vector<unsigned> OpcodeFreq;
BuildHistogram(M, OpcodeFreq);
OS << " // Opcode Histogram:\n";
for (unsigned i = 0, e = OpcodeFreq.size(); i != e; ++i) {
OS << " // #";
switch ((Matcher::KindTy)i) {
case Matcher::Scope: OS << "OPC_Scope"; break;
case Matcher::RecordNode: OS << "OPC_RecordNode"; break;
case Matcher::RecordChild: OS << "OPC_RecordChild"; break;
case Matcher::RecordMemRef: OS << "OPC_RecordMemRef"; break;
case Matcher::CaptureGlueInput: OS << "OPC_CaptureGlueInput"; break;
case Matcher::MoveChild: OS << "OPC_MoveChild"; break;
case Matcher::MoveParent: OS << "OPC_MoveParent"; break;
case Matcher::CheckSame: OS << "OPC_CheckSame"; break;
case Matcher::CheckChildSame: OS << "OPC_CheckChildSame"; break;
case Matcher::CheckPatternPredicate:
OS << "OPC_CheckPatternPredicate"; break;
case Matcher::CheckPredicate: OS << "OPC_CheckPredicate"; break;
case Matcher::CheckOpcode: OS << "OPC_CheckOpcode"; break;
case Matcher::SwitchOpcode: OS << "OPC_SwitchOpcode"; break;
case Matcher::CheckType: OS << "OPC_CheckType"; break;
case Matcher::SwitchType: OS << "OPC_SwitchType"; break;
case Matcher::CheckChildType: OS << "OPC_CheckChildType"; break;
case Matcher::CheckInteger: OS << "OPC_CheckInteger"; break;
case Matcher::CheckChildInteger: OS << "OPC_CheckChildInteger"; break;
case Matcher::CheckCondCode: OS << "OPC_CheckCondCode"; break;
case Matcher::CheckValueType: OS << "OPC_CheckValueType"; break;
case Matcher::CheckComplexPat: OS << "OPC_CheckComplexPat"; break;
case Matcher::CheckAndImm: OS << "OPC_CheckAndImm"; break;
case Matcher::CheckOrImm: OS << "OPC_CheckOrImm"; break;
case Matcher::CheckFoldableChainNode:
OS << "OPC_CheckFoldableChainNode"; break;
case Matcher::EmitInteger: OS << "OPC_EmitInteger"; break;
case Matcher::EmitStringInteger: OS << "OPC_EmitStringInteger"; break;
case Matcher::EmitRegister: OS << "OPC_EmitRegister"; break;
case Matcher::EmitConvertToTarget: OS << "OPC_EmitConvertToTarget"; break;
case Matcher::EmitMergeInputChains: OS << "OPC_EmitMergeInputChains"; break;
case Matcher::EmitCopyToReg: OS << "OPC_EmitCopyToReg"; break;
case Matcher::EmitNode: OS << "OPC_EmitNode"; break;
case Matcher::MorphNodeTo: OS << "OPC_MorphNodeTo"; break;
case Matcher::EmitNodeXForm: OS << "OPC_EmitNodeXForm"; break;
case Matcher::MarkGlueResults: OS << "OPC_MarkGlueResults"; break;
case Matcher::CompleteMatch: OS << "OPC_CompleteMatch"; break;
}
OS.PadToColumn(40) << " = " << OpcodeFreq[i] << '\n';
}
OS << '\n';
}
void llvm::EmitMatcherTable(const Matcher *TheMatcher,
const CodeGenDAGPatterns &CGP,
raw_ostream &O) {
formatted_raw_ostream OS(O);
OS << "// The main instruction selector code.\n";
OS << "SDNode *SelectCode(SDNode *N) {\n";
MatcherTableEmitter MatcherEmitter(CGP);
OS << " // Some target values are emitted as 2 bytes, TARGET_VAL handles\n";
OS << " // this.\n";
OS << " #define TARGET_VAL(X) X & 255, unsigned(X) >> 8\n";
OS << " static const unsigned char MatcherTable[] = {\n";
unsigned TotalSize = MatcherEmitter.EmitMatcherList(TheMatcher, 6, 0, OS);
OS << " 0\n }; // Total Array size is " << (TotalSize+1) << " bytes\n\n";
MatcherEmitter.EmitHistogram(TheMatcher, OS);
OS << " #undef TARGET_VAL\n";
OS << " return SelectCodeCommon(N, MatcherTable,sizeof(MatcherTable));\n}\n";
OS << '\n';
// Next up, emit the function for node and pattern predicates:
MatcherEmitter.EmitPredicateFunctions(OS);
}
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/FastISelEmitter.cpp | //===- FastISelEmitter.cpp - Generate an instruction selector -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This tablegen backend emits code for use by the "fast" instruction
// selection algorithm. See the comments at the top of
// lib/CodeGen/SelectionDAG/FastISel.cpp for background.
//
// This file scans through the target's tablegen instruction-info files
// and extracts instructions with obvious-looking patterns, and it emits
// code to look up these instructions by type and operator.
//
//===----------------------------------------------------------------------===//
#include "CodeGenDAGPatterns.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
using namespace llvm;
/// InstructionMemo - This class holds additional information about an
/// instruction needed to emit code for it.
///
namespace {
struct InstructionMemo {
std::string Name;
const CodeGenRegisterClass *RC;
std::string SubRegNo;
std::vector<std::string>* PhysRegs;
std::string PredicateCheck;
};
} // End anonymous namespace
/// ImmPredicateSet - This uniques predicates (represented as a string) and
/// gives them unique (small) integer ID's that start at 0.
namespace {
class ImmPredicateSet {
DenseMap<TreePattern *, unsigned> ImmIDs;
std::vector<TreePredicateFn> PredsByName;
public:
unsigned getIDFor(TreePredicateFn Pred) {
unsigned &Entry = ImmIDs[Pred.getOrigPatFragRecord()];
if (Entry == 0) {
PredsByName.push_back(Pred);
Entry = PredsByName.size();
}
return Entry-1;
}
const TreePredicateFn &getPredicate(unsigned i) {
assert(i < PredsByName.size());
return PredsByName[i];
}
typedef std::vector<TreePredicateFn>::const_iterator iterator;
iterator begin() const { return PredsByName.begin(); }
iterator end() const { return PredsByName.end(); }
};
} // End anonymous namespace
/// OperandsSignature - This class holds a description of a list of operand
/// types. It has utility methods for emitting text based on the operands.
///
namespace {
struct OperandsSignature {
class OpKind {
enum { OK_Reg, OK_FP, OK_Imm, OK_Invalid = -1 };
char Repr;
public:
OpKind() : Repr(OK_Invalid) {}
bool operator<(OpKind RHS) const { return Repr < RHS.Repr; }
bool operator==(OpKind RHS) const { return Repr == RHS.Repr; }
static OpKind getReg() { OpKind K; K.Repr = OK_Reg; return K; }
static OpKind getFP() { OpKind K; K.Repr = OK_FP; return K; }
static OpKind getImm(unsigned V) {
assert((unsigned)OK_Imm+V < 128 &&
"Too many integer predicates for the 'Repr' char");
OpKind K; K.Repr = OK_Imm+V; return K;
}
bool isReg() const { return Repr == OK_Reg; }
bool isFP() const { return Repr == OK_FP; }
bool isImm() const { return Repr >= OK_Imm; }
unsigned getImmCode() const { assert(isImm()); return Repr-OK_Imm; }
void printManglingSuffix(raw_ostream &OS, ImmPredicateSet &ImmPredicates,
bool StripImmCodes) const {
if (isReg())
OS << 'r';
else if (isFP())
OS << 'f';
else {
OS << 'i';
if (!StripImmCodes)
if (unsigned Code = getImmCode())
OS << "_" << ImmPredicates.getPredicate(Code-1).getFnName();
}
}
};
SmallVector<OpKind, 3> Operands;
bool operator<(const OperandsSignature &O) const {
return Operands < O.Operands;
}
bool operator==(const OperandsSignature &O) const {
return Operands == O.Operands;
}
bool empty() const { return Operands.empty(); }
bool hasAnyImmediateCodes() const {
for (unsigned i = 0, e = Operands.size(); i != e; ++i)
if (Operands[i].isImm() && Operands[i].getImmCode() != 0)
return true;
return false;
}
/// getWithoutImmCodes - Return a copy of this with any immediate codes forced
/// to zero.
OperandsSignature getWithoutImmCodes() const {
OperandsSignature Result;
for (unsigned i = 0, e = Operands.size(); i != e; ++i)
if (!Operands[i].isImm())
Result.Operands.push_back(Operands[i]);
else
Result.Operands.push_back(OpKind::getImm(0));
return Result;
}
void emitImmediatePredicate(raw_ostream &OS, ImmPredicateSet &ImmPredicates) {
bool EmittedAnything = false;
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
if (!Operands[i].isImm()) continue;
unsigned Code = Operands[i].getImmCode();
if (Code == 0) continue;
if (EmittedAnything)
OS << " &&\n ";
TreePredicateFn PredFn = ImmPredicates.getPredicate(Code-1);
// Emit the type check.
OS << "VT == "
<< getEnumName(PredFn.getOrigPatFragRecord()->getTree(0)->getType(0))
<< " && ";
OS << PredFn.getFnName() << "(imm" << i <<')';
EmittedAnything = true;
}
}
/// initialize - Examine the given pattern and initialize the contents
/// of the Operands array accordingly. Return true if all the operands
/// are supported, false otherwise.
///
bool initialize(TreePatternNode *InstPatNode, const CodeGenTarget &Target,
MVT::SimpleValueType VT,
ImmPredicateSet &ImmediatePredicates,
const CodeGenRegisterClass *OrigDstRC) {
if (InstPatNode->isLeaf())
return false;
if (InstPatNode->getOperator()->getName() == "imm") {
Operands.push_back(OpKind::getImm(0));
return true;
}
if (InstPatNode->getOperator()->getName() == "fpimm") {
Operands.push_back(OpKind::getFP());
return true;
}
const CodeGenRegisterClass *DstRC = nullptr;
for (unsigned i = 0, e = InstPatNode->getNumChildren(); i != e; ++i) {
TreePatternNode *Op = InstPatNode->getChild(i);
// Handle imm operands specially.
if (!Op->isLeaf() && Op->getOperator()->getName() == "imm") {
unsigned PredNo = 0;
if (!Op->getPredicateFns().empty()) {
TreePredicateFn PredFn = Op->getPredicateFns()[0];
// If there is more than one predicate weighing in on this operand
// then we don't handle it. This doesn't typically happen for
// immediates anyway.
if (Op->getPredicateFns().size() > 1 ||
!PredFn.isImmediatePattern())
return false;
// Ignore any instruction with 'FastIselShouldIgnore', these are
// not needed and just bloat the fast instruction selector. For
// example, X86 doesn't need to generate code to match ADD16ri8 since
// ADD16ri will do just fine.
Record *Rec = PredFn.getOrigPatFragRecord()->getRecord();
if (Rec->getValueAsBit("FastIselShouldIgnore"))
return false;
PredNo = ImmediatePredicates.getIDFor(PredFn)+1;
}
// Handle unmatched immediate sizes here.
//if (Op->getType(0) != VT)
// return false;
Operands.push_back(OpKind::getImm(PredNo));
continue;
}
// For now, filter out any operand with a predicate.
// For now, filter out any operand with multiple values.
if (!Op->getPredicateFns().empty() || Op->getNumTypes() != 1)
return false;
if (!Op->isLeaf()) {
if (Op->getOperator()->getName() == "fpimm") {
Operands.push_back(OpKind::getFP());
continue;
}
// For now, ignore other non-leaf nodes.
return false;
}
assert(Op->hasTypeSet(0) && "Type infererence not done?");
// For now, all the operands must have the same type (if they aren't
// immediates). Note that this causes us to reject variable sized shifts
// on X86.
if (Op->getType(0) != VT)
return false;
DefInit *OpDI = dyn_cast<DefInit>(Op->getLeafValue());
if (!OpDI)
return false;
Record *OpLeafRec = OpDI->getDef();
// For now, the only other thing we accept is register operands.
const CodeGenRegisterClass *RC = nullptr;
if (OpLeafRec->isSubClassOf("RegisterOperand"))
OpLeafRec = OpLeafRec->getValueAsDef("RegClass");
if (OpLeafRec->isSubClassOf("RegisterClass"))
RC = &Target.getRegisterClass(OpLeafRec);
else if (OpLeafRec->isSubClassOf("Register"))
RC = Target.getRegBank().getRegClassForRegister(OpLeafRec);
else if (OpLeafRec->isSubClassOf("ValueType")) {
RC = OrigDstRC;
} else
return false;
// For now, this needs to be a register class of some sort.
if (!RC)
return false;
// For now, all the operands must have the same register class or be
// a strict subclass of the destination.
if (DstRC) {
if (DstRC != RC && !DstRC->hasSubClass(RC))
return false;
} else
DstRC = RC;
Operands.push_back(OpKind::getReg());
}
return true;
}
void PrintParameters(raw_ostream &OS) const {
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
if (Operands[i].isReg()) {
OS << "unsigned Op" << i << ", bool Op" << i << "IsKill";
} else if (Operands[i].isImm()) {
OS << "uint64_t imm" << i;
} else if (Operands[i].isFP()) {
OS << "const ConstantFP *f" << i;
} else {
llvm_unreachable("Unknown operand kind!");
}
if (i + 1 != e)
OS << ", ";
}
}
void PrintArguments(raw_ostream &OS,
const std::vector<std::string> &PR) const {
assert(PR.size() == Operands.size());
bool PrintedArg = false;
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
if (PR[i] != "")
// Implicit physical register operand.
continue;
if (PrintedArg)
OS << ", ";
if (Operands[i].isReg()) {
OS << "Op" << i << ", Op" << i << "IsKill";
PrintedArg = true;
} else if (Operands[i].isImm()) {
OS << "imm" << i;
PrintedArg = true;
} else if (Operands[i].isFP()) {
OS << "f" << i;
PrintedArg = true;
} else {
llvm_unreachable("Unknown operand kind!");
}
}
}
void PrintArguments(raw_ostream &OS) const {
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
if (Operands[i].isReg()) {
OS << "Op" << i << ", Op" << i << "IsKill";
} else if (Operands[i].isImm()) {
OS << "imm" << i;
} else if (Operands[i].isFP()) {
OS << "f" << i;
} else {
llvm_unreachable("Unknown operand kind!");
}
if (i + 1 != e)
OS << ", ";
}
}
void PrintManglingSuffix(raw_ostream &OS, const std::vector<std::string> &PR,
ImmPredicateSet &ImmPredicates,
bool StripImmCodes = false) const {
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
if (PR[i] != "")
// Implicit physical register operand. e.g. Instruction::Mul expect to
// select to a binary op. On x86, mul may take a single operand with
// the other operand being implicit. We must emit something that looks
// like a binary instruction except for the very inner fastEmitInst_*
// call.
continue;
Operands[i].printManglingSuffix(OS, ImmPredicates, StripImmCodes);
}
}
void PrintManglingSuffix(raw_ostream &OS, ImmPredicateSet &ImmPredicates,
bool StripImmCodes = false) const {
for (unsigned i = 0, e = Operands.size(); i != e; ++i)
Operands[i].printManglingSuffix(OS, ImmPredicates, StripImmCodes);
}
};
} // End anonymous namespace
namespace {
class FastISelMap {
// A multimap is needed instead of a "plain" map because the key is
// the instruction's complexity (an int) and they are not unique.
typedef std::multimap<int, InstructionMemo> PredMap;
typedef std::map<MVT::SimpleValueType, PredMap> RetPredMap;
typedef std::map<MVT::SimpleValueType, RetPredMap> TypeRetPredMap;
typedef std::map<std::string, TypeRetPredMap> OpcodeTypeRetPredMap;
typedef std::map<OperandsSignature, OpcodeTypeRetPredMap>
OperandsOpcodeTypeRetPredMap;
OperandsOpcodeTypeRetPredMap SimplePatterns;
// This is used to check that there are no duplicate predicates
typedef std::multimap<std::string, bool> PredCheckMap;
typedef std::map<MVT::SimpleValueType, PredCheckMap> RetPredCheckMap;
typedef std::map<MVT::SimpleValueType, RetPredCheckMap> TypeRetPredCheckMap;
typedef std::map<std::string, TypeRetPredCheckMap> OpcodeTypeRetPredCheckMap;
typedef std::map<OperandsSignature, OpcodeTypeRetPredCheckMap>
OperandsOpcodeTypeRetPredCheckMap;
OperandsOpcodeTypeRetPredCheckMap SimplePatternsCheck;
std::map<OperandsSignature, std::vector<OperandsSignature> >
SignaturesWithConstantForms;
std::string InstNS;
ImmPredicateSet ImmediatePredicates;
public:
explicit FastISelMap(std::string InstNS);
void collectPatterns(CodeGenDAGPatterns &CGP);
void printImmediatePredicates(raw_ostream &OS);
void printFunctionDefinitions(raw_ostream &OS);
private:
void emitInstructionCode(raw_ostream &OS,
const OperandsSignature &Operands,
const PredMap &PM,
const std::string &RetVTName);
};
} // End anonymous namespace
static std::string getOpcodeName(Record *Op, CodeGenDAGPatterns &CGP) {
return CGP.getSDNodeInfo(Op).getEnumName();
}
static std::string getLegalCName(std::string OpName) {
std::string::size_type pos = OpName.find("::");
if (pos != std::string::npos)
OpName.replace(pos, 2, "_");
return OpName;
}
FastISelMap::FastISelMap(std::string instns)
: InstNS(instns) {
}
static std::string PhyRegForNode(TreePatternNode *Op,
const CodeGenTarget &Target) {
std::string PhysReg;
if (!Op->isLeaf())
return PhysReg;
Record *OpLeafRec = cast<DefInit>(Op->getLeafValue())->getDef();
if (!OpLeafRec->isSubClassOf("Register"))
return PhysReg;
PhysReg += cast<StringInit>(OpLeafRec->getValue("Namespace")->getValue())
->getValue();
PhysReg += "::";
PhysReg += Target.getRegBank().getReg(OpLeafRec)->getName();
return PhysReg;
}
void FastISelMap::collectPatterns(CodeGenDAGPatterns &CGP) {
const CodeGenTarget &Target = CGP.getTargetInfo();
// Determine the target's namespace name.
InstNS = Target.getInstNamespace() + "::";
assert(InstNS.size() > 2 && "Can't determine target-specific namespace!");
// Scan through all the patterns and record the simple ones.
for (CodeGenDAGPatterns::ptm_iterator I = CGP.ptm_begin(),
E = CGP.ptm_end(); I != E; ++I) {
const PatternToMatch &Pattern = *I;
// For now, just look at Instructions, so that we don't have to worry
// about emitting multiple instructions for a pattern.
TreePatternNode *Dst = Pattern.getDstPattern();
if (Dst->isLeaf()) continue;
Record *Op = Dst->getOperator();
if (!Op->isSubClassOf("Instruction"))
continue;
CodeGenInstruction &II = CGP.getTargetInfo().getInstruction(Op);
if (II.Operands.empty())
continue;
// For now, ignore multi-instruction patterns.
bool MultiInsts = false;
for (unsigned i = 0, e = Dst->getNumChildren(); i != e; ++i) {
TreePatternNode *ChildOp = Dst->getChild(i);
if (ChildOp->isLeaf())
continue;
if (ChildOp->getOperator()->isSubClassOf("Instruction")) {
MultiInsts = true;
break;
}
}
if (MultiInsts)
continue;
// For now, ignore instructions where the first operand is not an
// output register.
const CodeGenRegisterClass *DstRC = nullptr;
std::string SubRegNo;
if (Op->getName() != "EXTRACT_SUBREG") {
Record *Op0Rec = II.Operands[0].Rec;
if (Op0Rec->isSubClassOf("RegisterOperand"))
Op0Rec = Op0Rec->getValueAsDef("RegClass");
if (!Op0Rec->isSubClassOf("RegisterClass"))
continue;
DstRC = &Target.getRegisterClass(Op0Rec);
if (!DstRC)
continue;
} else {
// If this isn't a leaf, then continue since the register classes are
// a bit too complicated for now.
if (!Dst->getChild(1)->isLeaf()) continue;
DefInit *SR = dyn_cast<DefInit>(Dst->getChild(1)->getLeafValue());
if (SR)
SubRegNo = getQualifiedName(SR->getDef());
else
SubRegNo = Dst->getChild(1)->getLeafValue()->getAsString();
}
// Inspect the pattern.
TreePatternNode *InstPatNode = Pattern.getSrcPattern();
if (!InstPatNode) continue;
if (InstPatNode->isLeaf()) continue;
// Ignore multiple result nodes for now.
if (InstPatNode->getNumTypes() > 1) continue;
Record *InstPatOp = InstPatNode->getOperator();
std::string OpcodeName = getOpcodeName(InstPatOp, CGP);
MVT::SimpleValueType RetVT = MVT::isVoid;
if (InstPatNode->getNumTypes()) RetVT = InstPatNode->getType(0);
MVT::SimpleValueType VT = RetVT;
if (InstPatNode->getNumChildren()) {
assert(InstPatNode->getChild(0)->getNumTypes() == 1);
VT = InstPatNode->getChild(0)->getType(0);
}
// For now, filter out any instructions with predicates.
if (!InstPatNode->getPredicateFns().empty())
continue;
// Check all the operands.
OperandsSignature Operands;
if (!Operands.initialize(InstPatNode, Target, VT, ImmediatePredicates,
DstRC))
continue;
std::vector<std::string>* PhysRegInputs = new std::vector<std::string>();
if (InstPatNode->getOperator()->getName() == "imm" ||
InstPatNode->getOperator()->getName() == "fpimm")
PhysRegInputs->push_back("");
else {
// Compute the PhysRegs used by the given pattern, and check that
// the mapping from the src to dst patterns is simple.
bool FoundNonSimplePattern = false;
unsigned DstIndex = 0;
for (unsigned i = 0, e = InstPatNode->getNumChildren(); i != e; ++i) {
std::string PhysReg = PhyRegForNode(InstPatNode->getChild(i), Target);
if (PhysReg.empty()) {
if (DstIndex >= Dst->getNumChildren() ||
Dst->getChild(DstIndex)->getName() !=
InstPatNode->getChild(i)->getName()) {
FoundNonSimplePattern = true;
break;
}
++DstIndex;
}
PhysRegInputs->push_back(PhysReg);
}
if (Op->getName() != "EXTRACT_SUBREG" && DstIndex < Dst->getNumChildren())
FoundNonSimplePattern = true;
if (FoundNonSimplePattern)
continue;
}
// Check if the operands match one of the patterns handled by FastISel.
std::string ManglingSuffix;
raw_string_ostream SuffixOS(ManglingSuffix);
Operands.PrintManglingSuffix(SuffixOS, ImmediatePredicates, true);
SuffixOS.flush();
if (!StringSwitch<bool>(ManglingSuffix)
.Cases("", "r", "rr", "ri", "rf", true)
.Cases("rri", "i", "f", true)
.Default(false))
continue;
// Get the predicate that guards this pattern.
std::string PredicateCheck = Pattern.getPredicateCheck();
// Ok, we found a pattern that we can handle. Remember it.
InstructionMemo Memo = {
Pattern.getDstPattern()->getOperator()->getName(),
DstRC,
SubRegNo,
PhysRegInputs,
PredicateCheck
};
int complexity = Pattern.getPatternComplexity(CGP);
if (SimplePatternsCheck[Operands][OpcodeName][VT]
[RetVT].count(PredicateCheck)) {
PrintFatalError(Pattern.getSrcRecord()->getLoc(),
"Duplicate predicate in FastISel table!");
}
SimplePatternsCheck[Operands][OpcodeName][VT][RetVT].insert(
std::make_pair(PredicateCheck, true));
// Note: Instructions with the same complexity will appear in the order
// that they are encountered.
SimplePatterns[Operands][OpcodeName][VT][RetVT].insert(
std::make_pair(complexity, Memo));
// If any of the operands were immediates with predicates on them, strip
// them down to a signature that doesn't have predicates so that we can
// associate them with the stripped predicate version.
if (Operands.hasAnyImmediateCodes()) {
SignaturesWithConstantForms[Operands.getWithoutImmCodes()]
.push_back(Operands);
}
}
}
void FastISelMap::printImmediatePredicates(raw_ostream &OS) {
if (ImmediatePredicates.begin() == ImmediatePredicates.end())
return;
OS << "\n// FastEmit Immediate Predicate functions.\n";
for (ImmPredicateSet::iterator I = ImmediatePredicates.begin(),
E = ImmediatePredicates.end(); I != E; ++I) {
OS << "static bool " << I->getFnName() << "(int64_t Imm) {\n";
OS << I->getImmediatePredicateCode() << "\n}\n";
}
OS << "\n\n";
}
void FastISelMap::emitInstructionCode(raw_ostream &OS,
const OperandsSignature &Operands,
const PredMap &PM,
const std::string &RetVTName) {
// Emit code for each possible instruction. There may be
// multiple if there are subtarget concerns. A reverse iterator
// is used to produce the ones with highest complexity first.
bool OneHadNoPredicate = false;
for (PredMap::const_reverse_iterator PI = PM.rbegin(), PE = PM.rend();
PI != PE; ++PI) {
const InstructionMemo &Memo = PI->second;
std::string PredicateCheck = Memo.PredicateCheck;
if (PredicateCheck.empty()) {
assert(!OneHadNoPredicate &&
"Multiple instructions match and more than one had "
"no predicate!");
OneHadNoPredicate = true;
} else {
if (OneHadNoPredicate) {
// FIXME: This should be a PrintError once the x86 target
// fixes PR21575.
PrintWarning("Multiple instructions match and one with no "
"predicate came before one with a predicate! "
"name:" + Memo.Name + " predicate: " +
PredicateCheck);
}
OS << " if (" + PredicateCheck + ") {\n";
OS << " ";
}
for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) {
if ((*Memo.PhysRegs)[i] != "")
OS << " BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, "
<< "TII.get(TargetOpcode::COPY), "
<< (*Memo.PhysRegs)[i] << ").addReg(Op" << i << ");\n";
}
OS << " return fastEmitInst_";
if (Memo.SubRegNo.empty()) {
Operands.PrintManglingSuffix(OS, *Memo.PhysRegs,
ImmediatePredicates, true);
OS << "(" << InstNS << Memo.Name << ", ";
OS << "&" << InstNS << Memo.RC->getName() << "RegClass";
if (!Operands.empty())
OS << ", ";
Operands.PrintArguments(OS, *Memo.PhysRegs);
OS << ");\n";
} else {
OS << "extractsubreg(" << RetVTName
<< ", Op0, Op0IsKill, " << Memo.SubRegNo << ");\n";
}
if (!PredicateCheck.empty()) {
OS << " }\n";
}
}
// Return 0 if all of the possibilities had predicates but none
// were satisfied.
if (!OneHadNoPredicate)
OS << " return 0;\n";
OS << "}\n";
OS << "\n";
}
void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
// Now emit code for all the patterns that we collected.
for (OperandsOpcodeTypeRetPredMap::const_iterator OI = SimplePatterns.begin(),
OE = SimplePatterns.end(); OI != OE; ++OI) {
const OperandsSignature &Operands = OI->first;
const OpcodeTypeRetPredMap &OTM = OI->second;
for (OpcodeTypeRetPredMap::const_iterator I = OTM.begin(), E = OTM.end();
I != E; ++I) {
const std::string &Opcode = I->first;
const TypeRetPredMap &TM = I->second;
OS << "// FastEmit functions for " << Opcode << ".\n";
OS << "\n";
// Emit one function for each opcode,type pair.
for (TypeRetPredMap::const_iterator TI = TM.begin(), TE = TM.end();
TI != TE; ++TI) {
MVT::SimpleValueType VT = TI->first;
const RetPredMap &RM = TI->second;
if (RM.size() != 1) {
for (RetPredMap::const_iterator RI = RM.begin(), RE = RM.end();
RI != RE; ++RI) {
MVT::SimpleValueType RetVT = RI->first;
const PredMap &PM = RI->second;
OS << "unsigned fastEmit_"
<< getLegalCName(Opcode)
<< "_" << getLegalCName(getName(VT))
<< "_" << getLegalCName(getName(RetVT)) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(";
Operands.PrintParameters(OS);
OS << ") {\n";
emitInstructionCode(OS, Operands, PM, getName(RetVT));
}
// Emit one function for the type that demultiplexes on return type.
OS << "unsigned fastEmit_"
<< getLegalCName(Opcode) << "_"
<< getLegalCName(getName(VT)) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(MVT RetVT";
if (!Operands.empty())
OS << ", ";
Operands.PrintParameters(OS);
OS << ") {\nswitch (RetVT.SimpleTy) {\n";
for (RetPredMap::const_iterator RI = RM.begin(), RE = RM.end();
RI != RE; ++RI) {
MVT::SimpleValueType RetVT = RI->first;
OS << " case " << getName(RetVT) << ": return fastEmit_"
<< getLegalCName(Opcode) << "_" << getLegalCName(getName(VT))
<< "_" << getLegalCName(getName(RetVT)) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(";
Operands.PrintArguments(OS);
OS << ");\n";
}
OS << " default: return 0;\n}\n}\n\n";
} else {
// Non-variadic return type.
OS << "unsigned fastEmit_"
<< getLegalCName(Opcode) << "_"
<< getLegalCName(getName(VT)) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(MVT RetVT";
if (!Operands.empty())
OS << ", ";
Operands.PrintParameters(OS);
OS << ") {\n";
OS << " if (RetVT.SimpleTy != " << getName(RM.begin()->first)
<< ")\n return 0;\n";
const PredMap &PM = RM.begin()->second;
emitInstructionCode(OS, Operands, PM, "RetVT");
}
}
// Emit one function for the opcode that demultiplexes based on the type.
OS << "unsigned fastEmit_"
<< getLegalCName(Opcode) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(MVT VT, MVT RetVT";
if (!Operands.empty())
OS << ", ";
Operands.PrintParameters(OS);
OS << ") {\n";
OS << " switch (VT.SimpleTy) {\n";
for (TypeRetPredMap::const_iterator TI = TM.begin(), TE = TM.end();
TI != TE; ++TI) {
MVT::SimpleValueType VT = TI->first;
std::string TypeName = getName(VT);
OS << " case " << TypeName << ": return fastEmit_"
<< getLegalCName(Opcode) << "_" << getLegalCName(TypeName) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(RetVT";
if (!Operands.empty())
OS << ", ";
Operands.PrintArguments(OS);
OS << ");\n";
}
OS << " default: return 0;\n";
OS << " }\n";
OS << "}\n";
OS << "\n";
}
OS << "// Top-level FastEmit function.\n";
OS << "\n";
// Emit one function for the operand signature that demultiplexes based
// on opcode and type.
OS << "unsigned fastEmit_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(MVT VT, MVT RetVT, unsigned Opcode";
if (!Operands.empty())
OS << ", ";
Operands.PrintParameters(OS);
OS << ") ";
if (!Operands.hasAnyImmediateCodes())
OS << "override ";
OS << "{\n";
// If there are any forms of this signature available that operate on
// constrained forms of the immediate (e.g., 32-bit sext immediate in a
// 64-bit operand), check them first.
std::map<OperandsSignature, std::vector<OperandsSignature> >::iterator MI
= SignaturesWithConstantForms.find(Operands);
if (MI != SignaturesWithConstantForms.end()) {
// Unique any duplicates out of the list.
std::sort(MI->second.begin(), MI->second.end());
MI->second.erase(std::unique(MI->second.begin(), MI->second.end()),
MI->second.end());
// Check each in order it was seen. It would be nice to have a good
// relative ordering between them, but we're not going for optimality
// here.
for (unsigned i = 0, e = MI->second.size(); i != e; ++i) {
OS << " if (";
MI->second[i].emitImmediatePredicate(OS, ImmediatePredicates);
OS << ")\n if (unsigned Reg = fastEmit_";
MI->second[i].PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(VT, RetVT, Opcode";
if (!MI->second[i].empty())
OS << ", ";
MI->second[i].PrintArguments(OS);
OS << "))\n return Reg;\n\n";
}
// Done with this, remove it.
SignaturesWithConstantForms.erase(MI);
}
OS << " switch (Opcode) {\n";
for (OpcodeTypeRetPredMap::const_iterator I = OTM.begin(), E = OTM.end();
I != E; ++I) {
const std::string &Opcode = I->first;
OS << " case " << Opcode << ": return fastEmit_"
<< getLegalCName(Opcode) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(VT, RetVT";
if (!Operands.empty())
OS << ", ";
Operands.PrintArguments(OS);
OS << ");\n";
}
OS << " default: return 0;\n";
OS << " }\n";
OS << "}\n";
OS << "\n";
}
// TODO: SignaturesWithConstantForms should be empty here.
}
namespace llvm {
void EmitFastISel(RecordKeeper &RK, raw_ostream &OS) {
CodeGenDAGPatterns CGP(RK);
const CodeGenTarget &Target = CGP.getTargetInfo();
emitSourceFileHeader("\"Fast\" Instruction Selector for the " +
Target.getName() + " target", OS);
// Determine the target's namespace name.
std::string InstNS = Target.getInstNamespace() + "::";
assert(InstNS.size() > 2 && "Can't determine target-specific namespace!");
FastISelMap F(InstNS);
F.collectPatterns(CGP);
F.printImmediatePredicates(OS);
F.printFunctionDefinitions(OS);
}
} // End llvm namespace
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/X86DisassemblerTables.cpp | //===- X86DisassemblerTables.cpp - Disassembler tables ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is part of the X86 Disassembler Emitter.
// It contains the implementation of the disassembler tables.
// Documentation for the disassembler emitter in general can be found in
// X86DisasemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
#include "X86DisassemblerTables.h"
#include "X86DisassemblerShared.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include <map>
using namespace llvm;
using namespace X86Disassembler;
/// stringForContext - Returns a string containing the name of a particular
/// InstructionContext, usually for diagnostic purposes.
///
/// @param insnContext - The instruction class to transform to a string.
/// @return - A statically-allocated string constant that contains the
/// name of the instruction class.
static inline const char* stringForContext(InstructionContext insnContext) {
switch (insnContext) {
default:
llvm_unreachable("Unhandled instruction class");
#define ENUM_ENTRY(n, r, d) case n: return #n; break;
#define ENUM_ENTRY_K_B(n, r, d) ENUM_ENTRY(n, r, d) ENUM_ENTRY(n##_K_B, r, d)\
ENUM_ENTRY(n##_KZ, r, d) ENUM_ENTRY(n##_K, r, d) ENUM_ENTRY(n##_B, r, d)\
ENUM_ENTRY(n##_KZ_B, r, d)
INSTRUCTION_CONTEXTS
#undef ENUM_ENTRY
#undef ENUM_ENTRY_K_B
}
}
/// stringForOperandType - Like stringForContext, but for OperandTypes.
static inline const char* stringForOperandType(OperandType type) {
switch (type) {
default:
llvm_unreachable("Unhandled type");
#define ENUM_ENTRY(i, d) case i: return #i;
TYPES
#undef ENUM_ENTRY
}
}
/// stringForOperandEncoding - like stringForContext, but for
/// OperandEncodings.
static inline const char* stringForOperandEncoding(OperandEncoding encoding) {
switch (encoding) {
default:
llvm_unreachable("Unhandled encoding");
#define ENUM_ENTRY(i, d) case i: return #i;
ENCODINGS
#undef ENUM_ENTRY
}
}
/// inheritsFrom - Indicates whether all instructions in one class also belong
/// to another class.
///
/// @param child - The class that may be the subset
/// @param parent - The class that may be the superset
/// @return - True if child is a subset of parent, false otherwise.
static inline bool inheritsFrom(InstructionContext child,
InstructionContext parent,
bool VEX_LIG = false, bool AdSize64 = false) {
if (child == parent)
return true;
switch (parent) {
case IC:
return(inheritsFrom(child, IC_64BIT, AdSize64) ||
inheritsFrom(child, IC_OPSIZE) ||
inheritsFrom(child, IC_ADSIZE) ||
inheritsFrom(child, IC_XD) ||
inheritsFrom(child, IC_XS));
case IC_64BIT:
return(inheritsFrom(child, IC_64BIT_REXW) ||
inheritsFrom(child, IC_64BIT_OPSIZE) ||
(!AdSize64 && inheritsFrom(child, IC_64BIT_ADSIZE)) ||
inheritsFrom(child, IC_64BIT_XD) ||
inheritsFrom(child, IC_64BIT_XS));
case IC_OPSIZE:
return inheritsFrom(child, IC_64BIT_OPSIZE) ||
inheritsFrom(child, IC_OPSIZE_ADSIZE);
case IC_ADSIZE:
return inheritsFrom(child, IC_OPSIZE_ADSIZE);
case IC_OPSIZE_ADSIZE:
return false;
case IC_64BIT_ADSIZE:
return inheritsFrom(child, IC_64BIT_OPSIZE_ADSIZE);
case IC_64BIT_OPSIZE_ADSIZE:
return false;
case IC_XD:
return inheritsFrom(child, IC_64BIT_XD);
case IC_XS:
return inheritsFrom(child, IC_64BIT_XS);
case IC_XD_OPSIZE:
return inheritsFrom(child, IC_64BIT_XD_OPSIZE);
case IC_XS_OPSIZE:
return inheritsFrom(child, IC_64BIT_XS_OPSIZE);
case IC_64BIT_REXW:
return(inheritsFrom(child, IC_64BIT_REXW_XS) ||
inheritsFrom(child, IC_64BIT_REXW_XD) ||
inheritsFrom(child, IC_64BIT_REXW_OPSIZE) ||
(!AdSize64 && inheritsFrom(child, IC_64BIT_REXW_ADSIZE)));
case IC_64BIT_OPSIZE:
return inheritsFrom(child, IC_64BIT_REXW_OPSIZE) ||
(!AdSize64 && inheritsFrom(child, IC_64BIT_OPSIZE_ADSIZE)) ||
(!AdSize64 && inheritsFrom(child, IC_64BIT_REXW_ADSIZE));
case IC_64BIT_XD:
return(inheritsFrom(child, IC_64BIT_REXW_XD));
case IC_64BIT_XS:
return(inheritsFrom(child, IC_64BIT_REXW_XS));
case IC_64BIT_XD_OPSIZE:
case IC_64BIT_XS_OPSIZE:
return false;
case IC_64BIT_REXW_XD:
case IC_64BIT_REXW_XS:
case IC_64BIT_REXW_OPSIZE:
case IC_64BIT_REXW_ADSIZE:
return false;
case IC_VEX:
return (VEX_LIG && inheritsFrom(child, IC_VEX_L_W)) ||
inheritsFrom(child, IC_VEX_W) ||
(VEX_LIG && inheritsFrom(child, IC_VEX_L));
case IC_VEX_XS:
return (VEX_LIG && inheritsFrom(child, IC_VEX_L_W_XS)) ||
inheritsFrom(child, IC_VEX_W_XS) ||
(VEX_LIG && inheritsFrom(child, IC_VEX_L_XS));
case IC_VEX_XD:
return (VEX_LIG && inheritsFrom(child, IC_VEX_L_W_XD)) ||
inheritsFrom(child, IC_VEX_W_XD) ||
(VEX_LIG && inheritsFrom(child, IC_VEX_L_XD));
case IC_VEX_OPSIZE:
return (VEX_LIG && inheritsFrom(child, IC_VEX_L_W_OPSIZE)) ||
inheritsFrom(child, IC_VEX_W_OPSIZE) ||
(VEX_LIG && inheritsFrom(child, IC_VEX_L_OPSIZE));
case IC_VEX_W:
return VEX_LIG && inheritsFrom(child, IC_VEX_L_W);
case IC_VEX_W_XS:
return VEX_LIG && inheritsFrom(child, IC_VEX_L_W_XS);
case IC_VEX_W_XD:
return VEX_LIG && inheritsFrom(child, IC_VEX_L_W_XD);
case IC_VEX_W_OPSIZE:
return VEX_LIG && inheritsFrom(child, IC_VEX_L_W_OPSIZE);
case IC_VEX_L:
return inheritsFrom(child, IC_VEX_L_W);
case IC_VEX_L_XS:
return inheritsFrom(child, IC_VEX_L_W_XS);
case IC_VEX_L_XD:
return inheritsFrom(child, IC_VEX_L_W_XD);
case IC_VEX_L_OPSIZE:
return inheritsFrom(child, IC_VEX_L_W_OPSIZE);
case IC_VEX_L_W:
case IC_VEX_L_W_XS:
case IC_VEX_L_W_XD:
case IC_VEX_L_W_OPSIZE:
return false;
case IC_EVEX:
return inheritsFrom(child, IC_EVEX_W) ||
inheritsFrom(child, IC_EVEX_L_W);
case IC_EVEX_XS:
return inheritsFrom(child, IC_EVEX_W_XS) ||
inheritsFrom(child, IC_EVEX_L_W_XS);
case IC_EVEX_XD:
return inheritsFrom(child, IC_EVEX_W_XD) ||
inheritsFrom(child, IC_EVEX_L_W_XD);
case IC_EVEX_OPSIZE:
return inheritsFrom(child, IC_EVEX_W_OPSIZE) ||
inheritsFrom(child, IC_EVEX_L_W_OPSIZE);
case IC_EVEX_B:
return false;
case IC_EVEX_W:
case IC_EVEX_W_XS:
case IC_EVEX_W_XD:
case IC_EVEX_W_OPSIZE:
return false;
case IC_EVEX_L:
case IC_EVEX_L_K_B:
case IC_EVEX_L_KZ_B:
case IC_EVEX_L_B:
case IC_EVEX_L_XS:
case IC_EVEX_L_XD:
case IC_EVEX_L_OPSIZE:
return false;
case IC_EVEX_L_W:
case IC_EVEX_L_W_XS:
case IC_EVEX_L_W_XD:
case IC_EVEX_L_W_OPSIZE:
return false;
case IC_EVEX_L2:
case IC_EVEX_L2_XS:
case IC_EVEX_L2_XD:
case IC_EVEX_L2_OPSIZE:
return false;
case IC_EVEX_L2_W:
case IC_EVEX_L2_W_XS:
case IC_EVEX_L2_W_XD:
case IC_EVEX_L2_W_OPSIZE:
return false;
case IC_EVEX_K:
return inheritsFrom(child, IC_EVEX_W_K) ||
inheritsFrom(child, IC_EVEX_L_W_K);
case IC_EVEX_XS_K:
case IC_EVEX_XS_K_B:
case IC_EVEX_XS_KZ_B:
return inheritsFrom(child, IC_EVEX_W_XS_K) ||
inheritsFrom(child, IC_EVEX_L_W_XS_K);
case IC_EVEX_XD_K:
case IC_EVEX_XD_K_B:
case IC_EVEX_XD_KZ_B:
return inheritsFrom(child, IC_EVEX_W_XD_K) ||
inheritsFrom(child, IC_EVEX_L_W_XD_K);
case IC_EVEX_XS_B:
case IC_EVEX_XD_B:
case IC_EVEX_K_B:
case IC_EVEX_KZ:
return false;
case IC_EVEX_XS_KZ:
return inheritsFrom(child, IC_EVEX_W_XS_KZ) ||
inheritsFrom(child, IC_EVEX_L_W_XS_KZ);
case IC_EVEX_XD_KZ:
return inheritsFrom(child, IC_EVEX_W_XD_KZ) ||
inheritsFrom(child, IC_EVEX_L_W_XD_KZ);
case IC_EVEX_KZ_B:
case IC_EVEX_OPSIZE_K:
case IC_EVEX_OPSIZE_B:
case IC_EVEX_OPSIZE_K_B:
case IC_EVEX_OPSIZE_KZ:
case IC_EVEX_OPSIZE_KZ_B:
return false;
case IC_EVEX_W_K:
case IC_EVEX_W_B:
case IC_EVEX_W_K_B:
case IC_EVEX_W_KZ_B:
case IC_EVEX_W_XS_K:
case IC_EVEX_W_XD_K:
case IC_EVEX_W_OPSIZE_K:
case IC_EVEX_W_OPSIZE_B:
case IC_EVEX_W_OPSIZE_K_B:
return false;
case IC_EVEX_L_K:
case IC_EVEX_L_XS_K:
case IC_EVEX_L_XD_K:
case IC_EVEX_L_XD_B:
case IC_EVEX_L_XD_K_B:
case IC_EVEX_L_OPSIZE_K:
case IC_EVEX_L_OPSIZE_B:
case IC_EVEX_L_OPSIZE_K_B:
return false;
case IC_EVEX_W_KZ:
case IC_EVEX_W_XS_KZ:
case IC_EVEX_W_XD_KZ:
case IC_EVEX_W_XS_B:
case IC_EVEX_W_XD_B:
case IC_EVEX_W_XS_K_B:
case IC_EVEX_W_XD_K_B:
case IC_EVEX_W_XS_KZ_B:
case IC_EVEX_W_XD_KZ_B:
case IC_EVEX_W_OPSIZE_KZ:
case IC_EVEX_W_OPSIZE_KZ_B:
return false;
case IC_EVEX_L_KZ:
case IC_EVEX_L_XS_KZ:
case IC_EVEX_L_XS_B:
case IC_EVEX_L_XS_K_B:
case IC_EVEX_L_XS_KZ_B:
case IC_EVEX_L_XD_KZ:
case IC_EVEX_L_XD_KZ_B:
case IC_EVEX_L_OPSIZE_KZ:
case IC_EVEX_L_OPSIZE_KZ_B:
return false;
case IC_EVEX_L_W_K:
case IC_EVEX_L_W_B:
case IC_EVEX_L_W_K_B:
case IC_EVEX_L_W_XS_K:
case IC_EVEX_L_W_XS_B:
case IC_EVEX_L_W_XS_K_B:
case IC_EVEX_L_W_XS_KZ:
case IC_EVEX_L_W_XS_KZ_B:
case IC_EVEX_L_W_OPSIZE_K:
case IC_EVEX_L_W_OPSIZE_B:
case IC_EVEX_L_W_OPSIZE_K_B:
case IC_EVEX_L_W_KZ:
case IC_EVEX_L_W_KZ_B:
case IC_EVEX_L_W_XD_K:
case IC_EVEX_L_W_XD_B:
case IC_EVEX_L_W_XD_K_B:
case IC_EVEX_L_W_XD_KZ:
case IC_EVEX_L_W_XD_KZ_B:
case IC_EVEX_L_W_OPSIZE_KZ:
case IC_EVEX_L_W_OPSIZE_KZ_B:
return false;
case IC_EVEX_L2_K:
case IC_EVEX_L2_B:
case IC_EVEX_L2_K_B:
case IC_EVEX_L2_KZ_B:
case IC_EVEX_L2_XS_K:
case IC_EVEX_L2_XS_K_B:
case IC_EVEX_L2_XS_B:
case IC_EVEX_L2_XD_B:
case IC_EVEX_L2_XD_K:
case IC_EVEX_L2_XD_K_B:
case IC_EVEX_L2_OPSIZE_K:
case IC_EVEX_L2_OPSIZE_B:
case IC_EVEX_L2_OPSIZE_K_B:
case IC_EVEX_L2_KZ:
case IC_EVEX_L2_XS_KZ:
case IC_EVEX_L2_XS_KZ_B:
case IC_EVEX_L2_XD_KZ:
case IC_EVEX_L2_XD_KZ_B:
case IC_EVEX_L2_OPSIZE_KZ:
case IC_EVEX_L2_OPSIZE_KZ_B:
return false;
case IC_EVEX_L2_W_K:
case IC_EVEX_L2_W_B:
case IC_EVEX_L2_W_K_B:
case IC_EVEX_L2_W_KZ_B:
case IC_EVEX_L2_W_XS_K:
case IC_EVEX_L2_W_XS_B:
case IC_EVEX_L2_W_XS_K_B:
case IC_EVEX_L2_W_XD_K:
case IC_EVEX_L2_W_XD_B:
case IC_EVEX_L2_W_OPSIZE_K:
case IC_EVEX_L2_W_OPSIZE_B:
case IC_EVEX_L2_W_OPSIZE_K_B:
case IC_EVEX_L2_W_KZ:
case IC_EVEX_L2_W_XS_KZ:
case IC_EVEX_L2_W_XS_KZ_B:
case IC_EVEX_L2_W_XD_KZ:
case IC_EVEX_L2_W_XD_K_B:
case IC_EVEX_L2_W_XD_KZ_B:
case IC_EVEX_L2_W_OPSIZE_KZ:
case IC_EVEX_L2_W_OPSIZE_KZ_B:
return false;
default:
errs() << "Unknown instruction class: " <<
stringForContext((InstructionContext)parent) << "\n";
llvm_unreachable("Unknown instruction class");
}
}
/// outranks - Indicates whether, if an instruction has two different applicable
/// classes, which class should be preferred when performing decode. This
/// imposes a total ordering (ties are resolved toward "lower")
///
/// @param upper - The class that may be preferable
/// @param lower - The class that may be less preferable
/// @return - True if upper is to be preferred, false otherwise.
static inline bool outranks(InstructionContext upper,
InstructionContext lower) {
assert(upper < IC_max);
assert(lower < IC_max);
#define ENUM_ENTRY(n, r, d) r,
#define ENUM_ENTRY_K_B(n, r, d) ENUM_ENTRY(n, r, d) \
ENUM_ENTRY(n##_K_B, r, d) ENUM_ENTRY(n##_KZ_B, r, d) \
ENUM_ENTRY(n##_KZ, r, d) ENUM_ENTRY(n##_K, r, d) ENUM_ENTRY(n##_B, r, d)
static int ranks[IC_max] = {
INSTRUCTION_CONTEXTS
};
#undef ENUM_ENTRY
#undef ENUM_ENTRY_K_B
return (ranks[upper] > ranks[lower]);
}
/// getDecisionType - Determines whether a ModRM decision with 255 entries can
/// be compacted by eliminating redundant information.
///
/// @param decision - The decision to be compacted.
/// @return - The compactest available representation for the decision.
static ModRMDecisionType getDecisionType(ModRMDecision &decision) {
bool satisfiesOneEntry = true;
bool satisfiesSplitRM = true;
bool satisfiesSplitReg = true;
bool satisfiesSplitMisc = true;
for (unsigned index = 0; index < 256; ++index) {
if (decision.instructionIDs[index] != decision.instructionIDs[0])
satisfiesOneEntry = false;
if (((index & 0xc0) == 0xc0) &&
(decision.instructionIDs[index] != decision.instructionIDs[0xc0]))
satisfiesSplitRM = false;
if (((index & 0xc0) != 0xc0) &&
(decision.instructionIDs[index] != decision.instructionIDs[0x00]))
satisfiesSplitRM = false;
if (((index & 0xc0) == 0xc0) &&
(decision.instructionIDs[index] != decision.instructionIDs[index&0xf8]))
satisfiesSplitReg = false;
if (((index & 0xc0) != 0xc0) &&
(decision.instructionIDs[index] != decision.instructionIDs[index&0x38]))
satisfiesSplitMisc = false;
}
if (satisfiesOneEntry)
return MODRM_ONEENTRY;
if (satisfiesSplitRM)
return MODRM_SPLITRM;
if (satisfiesSplitReg && satisfiesSplitMisc)
return MODRM_SPLITREG;
if (satisfiesSplitMisc)
return MODRM_SPLITMISC;
return MODRM_FULL;
}
/// stringForDecisionType - Returns a statically-allocated string corresponding
/// to a particular decision type.
///
/// @param dt - The decision type.
/// @return - A pointer to the statically-allocated string (e.g.,
/// "MODRM_ONEENTRY" for MODRM_ONEENTRY).
static const char* stringForDecisionType(ModRMDecisionType dt) {
#define ENUM_ENTRY(n) case n: return #n;
switch (dt) {
default:
llvm_unreachable("Unknown decision type");
MODRMTYPES
};
#undef ENUM_ENTRY
}
DisassemblerTables::DisassemblerTables() {
unsigned i;
for (i = 0; i < array_lengthof(Tables); i++) {
Tables[i] = new ContextDecision;
memset(Tables[i], 0, sizeof(ContextDecision));
}
HasConflicts = false;
}
DisassemblerTables::~DisassemblerTables() {
unsigned i;
for (i = 0; i < array_lengthof(Tables); i++)
delete Tables[i];
}
void DisassemblerTables::emitModRMDecision(raw_ostream &o1, raw_ostream &o2,
unsigned &i1, unsigned &i2,
unsigned &ModRMTableNum,
ModRMDecision &decision) const {
static uint32_t sTableNumber = 0;
static uint32_t sEntryNumber = 1;
ModRMDecisionType dt = getDecisionType(decision);
if (dt == MODRM_ONEENTRY && decision.instructionIDs[0] == 0)
{
o2.indent(i2) << "{ /* ModRMDecision */" << "\n";
i2++;
o2.indent(i2) << stringForDecisionType(dt) << "," << "\n";
o2.indent(i2) << 0 << " /* EmptyTable */\n";
i2--;
o2.indent(i2) << "}";
return;
}
std::vector<unsigned> ModRMDecision;
switch (dt) {
default:
llvm_unreachable("Unknown decision type");
case MODRM_ONEENTRY:
ModRMDecision.push_back(decision.instructionIDs[0]);
break;
case MODRM_SPLITRM:
ModRMDecision.push_back(decision.instructionIDs[0x00]);
ModRMDecision.push_back(decision.instructionIDs[0xc0]);
break;
case MODRM_SPLITREG:
for (unsigned index = 0; index < 64; index += 8)
ModRMDecision.push_back(decision.instructionIDs[index]);
for (unsigned index = 0xc0; index < 256; index += 8)
ModRMDecision.push_back(decision.instructionIDs[index]);
break;
case MODRM_SPLITMISC:
for (unsigned index = 0; index < 64; index += 8)
ModRMDecision.push_back(decision.instructionIDs[index]);
for (unsigned index = 0xc0; index < 256; ++index)
ModRMDecision.push_back(decision.instructionIDs[index]);
break;
case MODRM_FULL:
for (unsigned index = 0; index < 256; ++index)
ModRMDecision.push_back(decision.instructionIDs[index]);
break;
}
unsigned &EntryNumber = ModRMTable[ModRMDecision];
if (EntryNumber == 0) {
EntryNumber = ModRMTableNum;
ModRMTableNum += ModRMDecision.size();
o1 << "/* Table" << EntryNumber << " */\n";
i1++;
for (std::vector<unsigned>::const_iterator I = ModRMDecision.begin(),
E = ModRMDecision.end(); I != E; ++I) {
o1.indent(i1 * 2) << format("0x%hx", *I) << ", /* "
<< InstructionSpecifiers[*I].name << " */\n";
}
i1--;
}
o2.indent(i2) << "{ /* struct ModRMDecision */" << "\n";
i2++;
o2.indent(i2) << stringForDecisionType(dt) << "," << "\n";
o2.indent(i2) << EntryNumber << " /* Table" << EntryNumber << " */\n";
i2--;
o2.indent(i2) << "}";
switch (dt) {
default:
llvm_unreachable("Unknown decision type");
case MODRM_ONEENTRY:
sEntryNumber += 1;
break;
case MODRM_SPLITRM:
sEntryNumber += 2;
break;
case MODRM_SPLITREG:
sEntryNumber += 16;
break;
case MODRM_SPLITMISC:
sEntryNumber += 8 + 64;
break;
case MODRM_FULL:
sEntryNumber += 256;
break;
}
// We assume that the index can fit into uint16_t.
assert(sEntryNumber < 65536U &&
"Index into ModRMDecision is too large for uint16_t!");
++sTableNumber;
}
void DisassemblerTables::emitOpcodeDecision(raw_ostream &o1, raw_ostream &o2,
unsigned &i1, unsigned &i2,
unsigned &ModRMTableNum,
OpcodeDecision &decision) const {
o2.indent(i2) << "{ /* struct OpcodeDecision */" << "\n";
i2++;
o2.indent(i2) << "{" << "\n";
i2++;
for (unsigned index = 0; index < 256; ++index) {
o2.indent(i2);
o2 << "/* 0x" << format("%02hhx", index) << " */" << "\n";
emitModRMDecision(o1, o2, i1, i2, ModRMTableNum,
decision.modRMDecisions[index]);
if (index < 255)
o2 << ",";
o2 << "\n";
}
i2--;
o2.indent(i2) << "}" << "\n";
i2--;
o2.indent(i2) << "}" << "\n";
}
void DisassemblerTables::emitContextDecision(raw_ostream &o1, raw_ostream &o2,
unsigned &i1, unsigned &i2,
unsigned &ModRMTableNum,
ContextDecision &decision,
const char* name) const {
o2.indent(i2) << "static const struct ContextDecision " << name << " = {\n";
i2++;
o2.indent(i2) << "{ /* opcodeDecisions */" << "\n";
i2++;
for (unsigned index = 0; index < IC_max; ++index) {
o2.indent(i2) << "/* ";
o2 << stringForContext((InstructionContext)index);
o2 << " */";
o2 << "\n";
emitOpcodeDecision(o1, o2, i1, i2, ModRMTableNum,
decision.opcodeDecisions[index]);
if (index + 1 < IC_max)
o2 << ", ";
}
i2--;
o2.indent(i2) << "}" << "\n";
i2--;
o2.indent(i2) << "};" << "\n";
}
void DisassemblerTables::emitInstructionInfo(raw_ostream &o,
unsigned &i) const {
unsigned NumInstructions = InstructionSpecifiers.size();
o << "static const struct OperandSpecifier x86OperandSets[]["
<< X86_MAX_OPERANDS << "] = {\n";
typedef SmallVector<std::pair<OperandEncoding, OperandType>,
X86_MAX_OPERANDS> OperandListTy;
std::map<OperandListTy, unsigned> OperandSets;
unsigned OperandSetNum = 0;
for (unsigned Index = 0; Index < NumInstructions; ++Index) {
OperandListTy OperandList;
for (unsigned OperandIndex = 0; OperandIndex < X86_MAX_OPERANDS;
++OperandIndex) {
OperandEncoding Encoding = (OperandEncoding)InstructionSpecifiers[Index]
.operands[OperandIndex].encoding;
OperandType Type = (OperandType)InstructionSpecifiers[Index]
.operands[OperandIndex].type;
OperandList.push_back(std::make_pair(Encoding, Type));
}
unsigned &N = OperandSets[OperandList];
if (N != 0) continue;
N = ++OperandSetNum;
o << " { /* " << (OperandSetNum - 1) << " */\n";
for (unsigned i = 0, e = OperandList.size(); i != e; ++i) {
const char *Encoding = stringForOperandEncoding(OperandList[i].first);
const char *Type = stringForOperandType(OperandList[i].second);
o << " { " << Encoding << ", " << Type << " },\n";
}
o << " },\n";
}
o << "};" << "\n\n";
o.indent(i * 2) << "static const struct InstructionSpecifier ";
o << INSTRUCTIONS_STR "[" << InstructionSpecifiers.size() << "] = {\n";
i++;
for (unsigned index = 0; index < NumInstructions; ++index) {
o.indent(i * 2) << "{ /* " << index << " */\n";
i++;
OperandListTy OperandList;
for (unsigned OperandIndex = 0; OperandIndex < X86_MAX_OPERANDS;
++OperandIndex) {
OperandEncoding Encoding = (OperandEncoding)InstructionSpecifiers[index]
.operands[OperandIndex].encoding;
OperandType Type = (OperandType)InstructionSpecifiers[index]
.operands[OperandIndex].type;
OperandList.push_back(std::make_pair(Encoding, Type));
}
o.indent(i * 2) << (OperandSets[OperandList] - 1) << ",\n";
o.indent(i * 2) << "/* " << InstructionSpecifiers[index].name << " */\n";
i--;
o.indent(i * 2) << "},\n";
}
i--;
o.indent(i * 2) << "};" << "\n";
}
void DisassemblerTables::emitContextTable(raw_ostream &o, unsigned &i) const {
const unsigned int tableSize = 16384;
o.indent(i * 2) << "static const uint8_t " CONTEXTS_STR
"[" << tableSize << "] = {\n";
i++;
for (unsigned index = 0; index < tableSize; ++index) {
o.indent(i * 2);
if (index & ATTR_EVEX) {
o << "IC_EVEX";
if (index & ATTR_EVEXL2)
o << "_L2";
else if (index & ATTR_EVEXL)
o << "_L";
if (index & ATTR_REXW)
o << "_W";
if (index & ATTR_OPSIZE)
o << "_OPSIZE";
else if (index & ATTR_XD)
o << "_XD";
else if (index & ATTR_XS)
o << "_XS";
if (index & ATTR_EVEXKZ)
o << "_KZ";
else if (index & ATTR_EVEXK)
o << "_K";
if (index & ATTR_EVEXB)
o << "_B";
}
else if ((index & ATTR_VEXL) && (index & ATTR_REXW) && (index & ATTR_OPSIZE))
o << "IC_VEX_L_W_OPSIZE";
else if ((index & ATTR_VEXL) && (index & ATTR_REXW) && (index & ATTR_XD))
o << "IC_VEX_L_W_XD";
else if ((index & ATTR_VEXL) && (index & ATTR_REXW) && (index & ATTR_XS))
o << "IC_VEX_L_W_XS";
else if ((index & ATTR_VEXL) && (index & ATTR_REXW))
o << "IC_VEX_L_W";
else if ((index & ATTR_VEXL) && (index & ATTR_OPSIZE))
o << "IC_VEX_L_OPSIZE";
else if ((index & ATTR_VEXL) && (index & ATTR_XD))
o << "IC_VEX_L_XD";
else if ((index & ATTR_VEXL) && (index & ATTR_XS))
o << "IC_VEX_L_XS";
else if ((index & ATTR_VEX) && (index & ATTR_REXW) && (index & ATTR_OPSIZE))
o << "IC_VEX_W_OPSIZE";
else if ((index & ATTR_VEX) && (index & ATTR_REXW) && (index & ATTR_XD))
o << "IC_VEX_W_XD";
else if ((index & ATTR_VEX) && (index & ATTR_REXW) && (index & ATTR_XS))
o << "IC_VEX_W_XS";
else if (index & ATTR_VEXL)
o << "IC_VEX_L";
else if ((index & ATTR_VEX) && (index & ATTR_REXW))
o << "IC_VEX_W";
else if ((index & ATTR_VEX) && (index & ATTR_OPSIZE))
o << "IC_VEX_OPSIZE";
else if ((index & ATTR_VEX) && (index & ATTR_XD))
o << "IC_VEX_XD";
else if ((index & ATTR_VEX) && (index & ATTR_XS))
o << "IC_VEX_XS";
else if (index & ATTR_VEX)
o << "IC_VEX";
else if ((index & ATTR_64BIT) && (index & ATTR_REXW) && (index & ATTR_XS))
o << "IC_64BIT_REXW_XS";
else if ((index & ATTR_64BIT) && (index & ATTR_REXW) && (index & ATTR_XD))
o << "IC_64BIT_REXW_XD";
else if ((index & ATTR_64BIT) && (index & ATTR_REXW) &&
(index & ATTR_OPSIZE))
o << "IC_64BIT_REXW_OPSIZE";
else if ((index & ATTR_64BIT) && (index & ATTR_REXW) &&
(index & ATTR_ADSIZE))
o << "IC_64BIT_REXW_ADSIZE";
else if ((index & ATTR_64BIT) && (index & ATTR_XD) && (index & ATTR_OPSIZE))
o << "IC_64BIT_XD_OPSIZE";
else if ((index & ATTR_64BIT) && (index & ATTR_XS) && (index & ATTR_OPSIZE))
o << "IC_64BIT_XS_OPSIZE";
else if ((index & ATTR_64BIT) && (index & ATTR_XS))
o << "IC_64BIT_XS";
else if ((index & ATTR_64BIT) && (index & ATTR_XD))
o << "IC_64BIT_XD";
else if ((index & ATTR_64BIT) && (index & ATTR_OPSIZE) &&
(index & ATTR_ADSIZE))
o << "IC_64BIT_OPSIZE_ADSIZE";
else if ((index & ATTR_64BIT) && (index & ATTR_OPSIZE))
o << "IC_64BIT_OPSIZE";
else if ((index & ATTR_64BIT) && (index & ATTR_ADSIZE))
o << "IC_64BIT_ADSIZE";
else if ((index & ATTR_64BIT) && (index & ATTR_REXW))
o << "IC_64BIT_REXW";
else if ((index & ATTR_64BIT))
o << "IC_64BIT";
else if ((index & ATTR_XS) && (index & ATTR_OPSIZE))
o << "IC_XS_OPSIZE";
else if ((index & ATTR_XD) && (index & ATTR_OPSIZE))
o << "IC_XD_OPSIZE";
else if (index & ATTR_XS)
o << "IC_XS";
else if (index & ATTR_XD)
o << "IC_XD";
else if ((index & ATTR_OPSIZE) && (index & ATTR_ADSIZE))
o << "IC_OPSIZE_ADSIZE";
else if (index & ATTR_OPSIZE)
o << "IC_OPSIZE";
else if (index & ATTR_ADSIZE)
o << "IC_ADSIZE";
else
o << "IC";
if (index < tableSize - 1)
o << ",";
else
o << " ";
o << " /* " << index << " */";
o << "\n";
}
i--;
o.indent(i * 2) << "};" << "\n";
}
void DisassemblerTables::emitContextDecisions(raw_ostream &o1, raw_ostream &o2,
unsigned &i1, unsigned &i2,
unsigned &ModRMTableNum) const {
emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[0], ONEBYTE_STR);
emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[1], TWOBYTE_STR);
emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[2], THREEBYTE38_STR);
emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[3], THREEBYTE3A_STR);
emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[4], XOP8_MAP_STR);
emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[5], XOP9_MAP_STR);
emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[6], XOPA_MAP_STR);
}
void DisassemblerTables::emit(raw_ostream &o) const {
unsigned i1 = 0;
unsigned i2 = 0;
std::string s1;
std::string s2;
raw_string_ostream o1(s1);
raw_string_ostream o2(s2);
emitInstructionInfo(o, i2);
o << "\n";
emitContextTable(o, i2);
o << "\n";
unsigned ModRMTableNum = 0;
o << "static const InstrUID modRMTable[] = {\n";
i1++;
std::vector<unsigned> EmptyTable(1, 0);
ModRMTable[EmptyTable] = ModRMTableNum;
ModRMTableNum += EmptyTable.size();
o1 << "/* EmptyTable */\n";
o1.indent(i1 * 2) << "0x0,\n";
i1--;
emitContextDecisions(o1, o2, i1, i2, ModRMTableNum);
o << o1.str();
o << " 0x0\n";
o << "};\n";
o << "\n";
o << o2.str();
o << "\n";
o << "\n";
}
void DisassemblerTables::setTableFields(ModRMDecision &decision,
const ModRMFilter &filter,
InstrUID uid,
uint8_t opcode) {
for (unsigned index = 0; index < 256; ++index) {
if (filter.accepts(index)) {
if (decision.instructionIDs[index] == uid)
continue;
if (decision.instructionIDs[index] != 0) {
InstructionSpecifier &newInfo =
InstructionSpecifiers[uid];
InstructionSpecifier &previousInfo =
InstructionSpecifiers[decision.instructionIDs[index]];
if(previousInfo.name == "NOOP" && (newInfo.name == "XCHG16ar" ||
newInfo.name == "XCHG32ar" ||
newInfo.name == "XCHG32ar64" ||
newInfo.name == "XCHG64ar"))
continue; // special case for XCHG*ar and NOOP
if (outranks(previousInfo.insnContext, newInfo.insnContext))
continue;
if (previousInfo.insnContext == newInfo.insnContext) {
errs() << "Error: Primary decode conflict: ";
errs() << newInfo.name << " would overwrite " << previousInfo.name;
errs() << "\n";
errs() << "ModRM " << index << "\n";
errs() << "Opcode " << (uint16_t)opcode << "\n";
errs() << "Context " << stringForContext(newInfo.insnContext) << "\n";
HasConflicts = true;
}
}
decision.instructionIDs[index] = uid;
}
}
}
void DisassemblerTables::setTableFields(OpcodeType type,
InstructionContext insnContext,
uint8_t opcode,
const ModRMFilter &filter,
InstrUID uid,
bool is32bit,
bool ignoresVEX_L,
unsigned addressSize) {
ContextDecision &decision = *Tables[type];
for (unsigned index = 0; index < IC_max; ++index) {
if ((is32bit || addressSize == 16) &&
inheritsFrom((InstructionContext)index, IC_64BIT))
continue;
bool adSize64 = addressSize == 64;
if (inheritsFrom((InstructionContext)index,
InstructionSpecifiers[uid].insnContext, ignoresVEX_L,
adSize64))
setTableFields(decision.opcodeDecisions[index].modRMDecisions[opcode],
filter,
uid,
opcode);
}
}
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/CodeGenIntrinsics.h | //===- CodeGenIntrinsic.h - Intrinsic Class Wrapper ------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a wrapper class for the 'Intrinsic' TableGen class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_UTILS_TABLEGEN_CODEGENINTRINSICS_H
#define LLVM_UTILS_TABLEGEN_CODEGENINTRINSICS_H
#include "llvm/CodeGen/MachineValueType.h"
#include <string>
#include <vector>
namespace llvm {
class Record;
class RecordKeeper;
class CodeGenTarget;
struct CodeGenIntrinsic {
Record *TheDef; // The actual record defining this intrinsic.
std::string Name; // The name of the LLVM function "llvm.bswap.i32"
std::string EnumName; // The name of the enum "bswap_i32"
std::string GCCBuiltinName;// Name of the corresponding GCC builtin, or "".
std::string MSBuiltinName; // Name of the corresponding MS builtin, or "".
std::string TargetPrefix; // Target prefix, e.g. "ppc" for t-s intrinsics.
/// IntrinsicSignature - This structure holds the return values and
/// parameter values of an intrinsic. If the number of return values is > 1,
/// then the intrinsic implicitly returns a first-class aggregate. The
/// numbering of the types starts at 0 with the first return value and
/// continues from there through the parameter list. This is useful for
/// "matching" types.
struct IntrinsicSignature {
/// RetVTs - The MVT::SimpleValueType for each return type. Note that this
/// list is only populated when in the context of a target .td file. When
/// building Intrinsics.td, this isn't available, because we don't know
/// the target pointer size.
std::vector<MVT::SimpleValueType> RetVTs;
/// RetTypeDefs - The records for each return type.
std::vector<Record*> RetTypeDefs;
/// ParamVTs - The MVT::SimpleValueType for each parameter type. Note that
/// this list is only populated when in the context of a target .td file.
/// When building Intrinsics.td, this isn't available, because we don't
/// know the target pointer size.
std::vector<MVT::SimpleValueType> ParamVTs;
/// ParamTypeDefs - The records for each parameter type.
std::vector<Record*> ParamTypeDefs;
};
IntrinsicSignature IS;
// Memory mod/ref behavior of this intrinsic.
enum {
NoMem, ReadArgMem, ReadMem, ReadWriteArgMem, ReadWriteMem
} ModRef;
/// This is set to true if the intrinsic is overloaded by its argument
/// types.
bool isOverloaded;
/// isCommutative - True if the intrinsic is commutative.
bool isCommutative;
/// canThrow - True if the intrinsic can throw.
bool canThrow;
/// isNoDuplicate - True if the intrinsic is marked as noduplicate.
bool isNoDuplicate;
/// isNoReturn - True if the intrinsic is no-return.
bool isNoReturn;
/// isConvergent - True if the intrinsic is marked as convergent.
bool isConvergent;
enum ArgAttribute {
NoCapture,
ReadOnly,
ReadNone
};
std::vector<std::pair<unsigned, ArgAttribute> > ArgumentAttributes;
CodeGenIntrinsic(Record *R);
};
/// LoadIntrinsics - Read all of the intrinsics defined in the specified
/// .td file.
std::vector<CodeGenIntrinsic> LoadIntrinsics(const RecordKeeper &RC,
bool TargetOnly);
}
#endif
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/CodeGenDAGPatterns.cpp | //===- CodeGenDAGPatterns.cpp - Read DAG patterns from .td file -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the CodeGenDAGPatterns class, which is used to read and
// represent the patterns present in a .td file for instructions.
//
//===----------------------------------------------------------------------===//
#include "CodeGenDAGPatterns.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include <algorithm>
#include <cstdio>
#include <set>
using namespace llvm;
#define DEBUG_TYPE "dag-patterns"
//===----------------------------------------------------------------------===//
// EEVT::TypeSet Implementation
//===----------------------------------------------------------------------===//
static inline bool isInteger(MVT::SimpleValueType VT) {
return MVT(VT).isInteger();
}
static inline bool isFloatingPoint(MVT::SimpleValueType VT) {
return MVT(VT).isFloatingPoint();
}
static inline bool isVector(MVT::SimpleValueType VT) {
return MVT(VT).isVector();
}
static inline bool isScalar(MVT::SimpleValueType VT) {
return !MVT(VT).isVector();
}
EEVT::TypeSet::TypeSet(MVT::SimpleValueType VT, TreePattern &TP) {
if (VT == MVT::iAny)
EnforceInteger(TP);
else if (VT == MVT::fAny)
EnforceFloatingPoint(TP);
else if (VT == MVT::vAny)
EnforceVector(TP);
else {
assert((VT < MVT::LAST_VALUETYPE || VT == MVT::iPTR ||
VT == MVT::iPTRAny || VT == MVT::Any) && "Not a concrete type!");
TypeVec.push_back(VT);
}
}
EEVT::TypeSet::TypeSet(ArrayRef<MVT::SimpleValueType> VTList) {
assert(!VTList.empty() && "empty list?");
TypeVec.append(VTList.begin(), VTList.end());
if (!VTList.empty())
assert(VTList[0] != MVT::iAny && VTList[0] != MVT::vAny &&
VTList[0] != MVT::fAny);
// Verify no duplicates.
array_pod_sort(TypeVec.begin(), TypeVec.end());
assert(std::unique(TypeVec.begin(), TypeVec.end()) == TypeVec.end());
}
/// FillWithPossibleTypes - Set to all legal types and return true, only valid
/// on completely unknown type sets.
bool EEVT::TypeSet::FillWithPossibleTypes(TreePattern &TP,
bool (*Pred)(MVT::SimpleValueType),
const char *PredicateName) {
assert(isCompletelyUnknown());
ArrayRef<MVT::SimpleValueType> LegalTypes =
TP.getDAGPatterns().getTargetInfo().getLegalValueTypes();
if (TP.hasError())
return false;
for (unsigned i = 0, e = LegalTypes.size(); i != e; ++i)
if (!Pred || Pred(LegalTypes[i]))
TypeVec.push_back(LegalTypes[i]);
// If we have nothing that matches the predicate, bail out.
if (TypeVec.empty()) {
TP.error("Type inference contradiction found, no " +
std::string(PredicateName) + " types found");
return false;
}
// No need to sort with one element.
if (TypeVec.size() == 1) return true;
// Remove duplicates.
array_pod_sort(TypeVec.begin(), TypeVec.end());
TypeVec.erase(std::unique(TypeVec.begin(), TypeVec.end()), TypeVec.end());
return true;
}
/// hasIntegerTypes - Return true if this TypeSet contains iAny or an
/// integer value type.
bool EEVT::TypeSet::hasIntegerTypes() const {
for (unsigned i = 0, e = TypeVec.size(); i != e; ++i)
if (isInteger(TypeVec[i]))
return true;
return false;
}
/// hasFloatingPointTypes - Return true if this TypeSet contains an fAny or
/// a floating point value type.
bool EEVT::TypeSet::hasFloatingPointTypes() const {
for (unsigned i = 0, e = TypeVec.size(); i != e; ++i)
if (isFloatingPoint(TypeVec[i]))
return true;
return false;
}
/// hasScalarTypes - Return true if this TypeSet contains a scalar value type.
bool EEVT::TypeSet::hasScalarTypes() const {
for (unsigned i = 0, e = TypeVec.size(); i != e; ++i)
if (isScalar(TypeVec[i]))
return true;
return false;
}
/// hasVectorTypes - Return true if this TypeSet contains a vAny or a vector
/// value type.
bool EEVT::TypeSet::hasVectorTypes() const {
for (unsigned i = 0, e = TypeVec.size(); i != e; ++i)
if (isVector(TypeVec[i]))
return true;
return false;
}
std::string EEVT::TypeSet::getName() const {
if (TypeVec.empty()) return "<empty>";
std::string Result;
for (unsigned i = 0, e = TypeVec.size(); i != e; ++i) {
std::string VTName = llvm::getEnumName(TypeVec[i]);
// Strip off MVT:: prefix if present.
if (VTName.substr(0,5) == "MVT::")
VTName = VTName.substr(5);
if (i) Result += ':';
Result += VTName;
}
if (TypeVec.size() == 1)
return Result;
return "{" + Result + "}";
}
/// MergeInTypeInfo - This merges in type information from the specified
/// argument. If 'this' changes, it returns true. If the two types are
/// contradictory (e.g. merge f32 into i32) then this flags an error.
bool EEVT::TypeSet::MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP){
if (InVT.isCompletelyUnknown() || *this == InVT || TP.hasError())
return false;
if (isCompletelyUnknown()) {
*this = InVT;
return true;
}
assert(TypeVec.size() >= 1 && InVT.TypeVec.size() >= 1 && "No unknowns");
// Handle the abstract cases, seeing if we can resolve them better.
switch (TypeVec[0]) {
default: break;
case MVT::iPTR:
case MVT::iPTRAny:
if (InVT.hasIntegerTypes()) {
EEVT::TypeSet InCopy(InVT);
InCopy.EnforceInteger(TP);
InCopy.EnforceScalar(TP);
if (InCopy.isConcrete()) {
// If the RHS has one integer type, upgrade iPTR to i32.
TypeVec[0] = InVT.TypeVec[0];
return true;
}
// If the input has multiple scalar integers, this doesn't add any info.
if (!InCopy.isCompletelyUnknown())
return false;
}
break;
}
// If the input constraint is iAny/iPTR and this is an integer type list,
// remove non-integer types from the list.
if ((InVT.TypeVec[0] == MVT::iPTR || InVT.TypeVec[0] == MVT::iPTRAny) &&
hasIntegerTypes()) {
bool MadeChange = EnforceInteger(TP);
// If we're merging in iPTR/iPTRAny and the node currently has a list of
// multiple different integer types, replace them with a single iPTR.
if ((InVT.TypeVec[0] == MVT::iPTR || InVT.TypeVec[0] == MVT::iPTRAny) &&
TypeVec.size() != 1) {
TypeVec.resize(1);
TypeVec[0] = InVT.TypeVec[0];
MadeChange = true;
}
return MadeChange;
}
// If this is a type list and the RHS is a typelist as well, eliminate entries
// from this list that aren't in the other one.
bool MadeChange = false;
TypeSet InputSet(*this);
for (unsigned i = 0; i != TypeVec.size(); ++i) {
bool InInVT = false;
for (unsigned j = 0, e = InVT.TypeVec.size(); j != e; ++j)
if (TypeVec[i] == InVT.TypeVec[j]) {
InInVT = true;
break;
}
if (InInVT) continue;
TypeVec.erase(TypeVec.begin()+i--);
MadeChange = true;
}
// If we removed all of our types, we have a type contradiction.
if (!TypeVec.empty())
return MadeChange;
// FIXME: Really want an SMLoc here!
TP.error("Type inference contradiction found, merging '" +
InVT.getName() + "' into '" + InputSet.getName() + "'");
return false;
}
/// EnforceInteger - Remove all non-integer types from this set.
bool EEVT::TypeSet::EnforceInteger(TreePattern &TP) {
if (TP.hasError())
return false;
// If we know nothing, then get the full set.
if (TypeVec.empty())
return FillWithPossibleTypes(TP, isInteger, "integer");
if (!hasFloatingPointTypes())
return false;
TypeSet InputSet(*this);
// Filter out all the fp types.
for (unsigned i = 0; i != TypeVec.size(); ++i)
if (!isInteger(TypeVec[i]))
TypeVec.erase(TypeVec.begin()+i--);
if (TypeVec.empty()) {
TP.error("Type inference contradiction found, '" +
InputSet.getName() + "' needs to be integer");
return false;
}
return true;
}
/// EnforceFloatingPoint - Remove all integer types from this set.
bool EEVT::TypeSet::EnforceFloatingPoint(TreePattern &TP) {
if (TP.hasError())
return false;
// If we know nothing, then get the full set.
if (TypeVec.empty())
return FillWithPossibleTypes(TP, isFloatingPoint, "floating point");
if (!hasIntegerTypes())
return false;
TypeSet InputSet(*this);
// Filter out all the fp types.
for (unsigned i = 0; i != TypeVec.size(); ++i)
if (!isFloatingPoint(TypeVec[i]))
TypeVec.erase(TypeVec.begin()+i--);
if (TypeVec.empty()) {
TP.error("Type inference contradiction found, '" +
InputSet.getName() + "' needs to be floating point");
return false;
}
return true;
}
/// EnforceScalar - Remove all vector types from this.
bool EEVT::TypeSet::EnforceScalar(TreePattern &TP) {
if (TP.hasError())
return false;
// If we know nothing, then get the full set.
if (TypeVec.empty())
return FillWithPossibleTypes(TP, isScalar, "scalar");
if (!hasVectorTypes())
return false;
TypeSet InputSet(*this);
// Filter out all the vector types.
for (unsigned i = 0; i != TypeVec.size(); ++i)
if (!isScalar(TypeVec[i]))
TypeVec.erase(TypeVec.begin()+i--);
if (TypeVec.empty()) {
TP.error("Type inference contradiction found, '" +
InputSet.getName() + "' needs to be scalar");
return false;
}
return true;
}
/// EnforceVector - Remove all vector types from this.
bool EEVT::TypeSet::EnforceVector(TreePattern &TP) {
if (TP.hasError())
return false;
// If we know nothing, then get the full set.
if (TypeVec.empty())
return FillWithPossibleTypes(TP, isVector, "vector");
TypeSet InputSet(*this);
bool MadeChange = false;
// Filter out all the scalar types.
for (unsigned i = 0; i != TypeVec.size(); ++i)
if (!isVector(TypeVec[i])) {
TypeVec.erase(TypeVec.begin()+i--);
MadeChange = true;
}
if (TypeVec.empty()) {
TP.error("Type inference contradiction found, '" +
InputSet.getName() + "' needs to be a vector");
return false;
}
return MadeChange;
}
/// EnforceSmallerThan - 'this' must be a smaller VT than Other. For vectors
/// this shoud be based on the element type. Update this and other based on
/// this information.
bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) {
if (TP.hasError())
return false;
// Both operands must be integer or FP, but we don't care which.
bool MadeChange = false;
if (isCompletelyUnknown())
MadeChange = FillWithPossibleTypes(TP);
if (Other.isCompletelyUnknown())
MadeChange = Other.FillWithPossibleTypes(TP);
// If one side is known to be integer or known to be FP but the other side has
// no information, get at least the type integrality info in there.
if (!hasFloatingPointTypes())
MadeChange |= Other.EnforceInteger(TP);
else if (!hasIntegerTypes())
MadeChange |= Other.EnforceFloatingPoint(TP);
if (!Other.hasFloatingPointTypes())
MadeChange |= EnforceInteger(TP);
else if (!Other.hasIntegerTypes())
MadeChange |= EnforceFloatingPoint(TP);
assert(!isCompletelyUnknown() && !Other.isCompletelyUnknown() &&
"Should have a type list now");
// If one contains vectors but the other doesn't pull vectors out.
if (!hasVectorTypes())
MadeChange |= Other.EnforceScalar(TP);
else if (!hasScalarTypes())
MadeChange |= Other.EnforceVector(TP);
if (!Other.hasVectorTypes())
MadeChange |= EnforceScalar(TP);
else if (!Other.hasScalarTypes())
MadeChange |= EnforceVector(TP);
// This code does not currently handle nodes which have multiple types,
// where some types are integer, and some are fp. Assert that this is not
// the case.
assert(!(hasIntegerTypes() && hasFloatingPointTypes()) &&
!(Other.hasIntegerTypes() && Other.hasFloatingPointTypes()) &&
"SDTCisOpSmallerThanOp does not handle mixed int/fp types!");
if (TP.hasError())
return false;
// Okay, find the smallest type from current set and remove anything the
// same or smaller from the other set. We need to ensure that the scalar
// type size is smaller than the scalar size of the smallest type. For
// vectors, we also need to make sure that the total size is no larger than
// the size of the smallest type.
TypeSet InputSet(Other);
MVT Smallest = TypeVec[0];
for (unsigned i = 0; i != Other.TypeVec.size(); ++i) {
MVT OtherVT = Other.TypeVec[i];
// Don't compare vector and non-vector types.
if (OtherVT.isVector() != Smallest.isVector())
continue;
// The getSizeInBits() check here is only needed for vectors, but is
// a subset of the scalar check for scalars so no need to qualify.
if (OtherVT.getScalarSizeInBits() <= Smallest.getScalarSizeInBits() ||
OtherVT.getSizeInBits() < Smallest.getSizeInBits()) {
Other.TypeVec.erase(Other.TypeVec.begin()+i--);
MadeChange = true;
}
}
if (Other.TypeVec.empty()) {
TP.error("Type inference contradiction found, '" + InputSet.getName() +
"' has nothing larger than '" + getName() +"'!");
return false;
}
// Okay, find the largest type from the other set and remove anything the
// same or smaller from the current set. We need to ensure that the scalar
// type size is larger than the scalar size of the largest type. For
// vectors, we also need to make sure that the total size is no smaller than
// the size of the largest type.
InputSet = TypeSet(*this);
MVT Largest = Other.TypeVec[Other.TypeVec.size()-1];
for (unsigned i = 0; i != TypeVec.size(); ++i) {
MVT OtherVT = TypeVec[i];
// Don't compare vector and non-vector types.
if (OtherVT.isVector() != Largest.isVector())
continue;
// The getSizeInBits() check here is only needed for vectors, but is
// a subset of the scalar check for scalars so no need to qualify.
if (OtherVT.getScalarSizeInBits() >= Largest.getScalarSizeInBits() ||
OtherVT.getSizeInBits() > Largest.getSizeInBits()) {
TypeVec.erase(TypeVec.begin()+i--);
MadeChange = true;
}
}
if (TypeVec.empty()) {
TP.error("Type inference contradiction found, '" + InputSet.getName() +
"' has nothing smaller than '" + Other.getName() +"'!");
return false;
}
return MadeChange;
}
/// EnforceVectorEltTypeIs - 'this' is now constrainted to be a vector type
/// whose element is specified by VTOperand.
bool EEVT::TypeSet::EnforceVectorEltTypeIs(MVT::SimpleValueType VT,
TreePattern &TP) {
bool MadeChange = false;
MadeChange |= EnforceVector(TP);
TypeSet InputSet(*this);
// Filter out all the types which don't have the right element type.
for (unsigned i = 0; i != TypeVec.size(); ++i) {
assert(isVector(TypeVec[i]) && "EnforceVector didn't work");
if (MVT(TypeVec[i]).getVectorElementType().SimpleTy != VT) {
TypeVec.erase(TypeVec.begin()+i--);
MadeChange = true;
}
}
if (TypeVec.empty()) { // FIXME: Really want an SMLoc here!
TP.error("Type inference contradiction found, forcing '" +
InputSet.getName() + "' to have a vector element");
return false;
}
return MadeChange;
}
/// EnforceVectorEltTypeIs - 'this' is now constrainted to be a vector type
/// whose element is specified by VTOperand.
bool EEVT::TypeSet::EnforceVectorEltTypeIs(EEVT::TypeSet &VTOperand,
TreePattern &TP) {
if (TP.hasError())
return false;
// "This" must be a vector and "VTOperand" must be a scalar.
bool MadeChange = false;
MadeChange |= EnforceVector(TP);
MadeChange |= VTOperand.EnforceScalar(TP);
// If we know the vector type, it forces the scalar to agree.
if (isConcrete()) {
MVT IVT = getConcrete();
IVT = IVT.getVectorElementType();
return MadeChange |
VTOperand.MergeInTypeInfo(IVT.SimpleTy, TP);
}
// If the scalar type is known, filter out vector types whose element types
// disagree.
if (!VTOperand.isConcrete())
return MadeChange;
MVT::SimpleValueType VT = VTOperand.getConcrete();
TypeSet InputSet(*this);
// Filter out all the types which don't have the right element type.
for (unsigned i = 0; i != TypeVec.size(); ++i) {
assert(isVector(TypeVec[i]) && "EnforceVector didn't work");
if (MVT(TypeVec[i]).getVectorElementType().SimpleTy != VT) {
TypeVec.erase(TypeVec.begin()+i--);
MadeChange = true;
}
}
if (TypeVec.empty()) { // FIXME: Really want an SMLoc here!
TP.error("Type inference contradiction found, forcing '" +
InputSet.getName() + "' to have a vector element");
return false;
}
return MadeChange;
}
/// EnforceVectorSubVectorTypeIs - 'this' is now constrainted to be a
/// vector type specified by VTOperand.
bool EEVT::TypeSet::EnforceVectorSubVectorTypeIs(EEVT::TypeSet &VTOperand,
TreePattern &TP) {
if (TP.hasError())
return false;
// "This" must be a vector and "VTOperand" must be a vector.
bool MadeChange = false;
MadeChange |= EnforceVector(TP);
MadeChange |= VTOperand.EnforceVector(TP);
// If one side is known to be integer or known to be FP but the other side has
// no information, get at least the type integrality info in there.
if (!hasFloatingPointTypes())
MadeChange |= VTOperand.EnforceInteger(TP);
else if (!hasIntegerTypes())
MadeChange |= VTOperand.EnforceFloatingPoint(TP);
if (!VTOperand.hasFloatingPointTypes())
MadeChange |= EnforceInteger(TP);
else if (!VTOperand.hasIntegerTypes())
MadeChange |= EnforceFloatingPoint(TP);
assert(!isCompletelyUnknown() && !VTOperand.isCompletelyUnknown() &&
"Should have a type list now");
// If we know the vector type, it forces the scalar types to agree.
// Also force one vector to have more elements than the other.
if (isConcrete()) {
MVT IVT = getConcrete();
unsigned NumElems = IVT.getVectorNumElements();
IVT = IVT.getVectorElementType();
EEVT::TypeSet EltTypeSet(IVT.SimpleTy, TP);
MadeChange |= VTOperand.EnforceVectorEltTypeIs(EltTypeSet, TP);
// Only keep types that have less elements than VTOperand.
TypeSet InputSet(VTOperand);
for (unsigned i = 0; i != VTOperand.TypeVec.size(); ++i) {
assert(isVector(VTOperand.TypeVec[i]) && "EnforceVector didn't work");
if (MVT(VTOperand.TypeVec[i]).getVectorNumElements() >= NumElems) {
VTOperand.TypeVec.erase(VTOperand.TypeVec.begin()+i--);
MadeChange = true;
}
}
if (VTOperand.TypeVec.empty()) { // FIXME: Really want an SMLoc here!
TP.error("Type inference contradiction found, forcing '" +
InputSet.getName() + "' to have less vector elements than '" +
getName() + "'");
return false;
}
} else if (VTOperand.isConcrete()) {
MVT IVT = VTOperand.getConcrete();
unsigned NumElems = IVT.getVectorNumElements();
IVT = IVT.getVectorElementType();
EEVT::TypeSet EltTypeSet(IVT.SimpleTy, TP);
MadeChange |= EnforceVectorEltTypeIs(EltTypeSet, TP);
// Only keep types that have more elements than 'this'.
TypeSet InputSet(*this);
for (unsigned i = 0; i != TypeVec.size(); ++i) {
assert(isVector(TypeVec[i]) && "EnforceVector didn't work");
if (MVT(TypeVec[i]).getVectorNumElements() <= NumElems) {
TypeVec.erase(TypeVec.begin()+i--);
MadeChange = true;
}
}
if (TypeVec.empty()) { // FIXME: Really want an SMLoc here!
TP.error("Type inference contradiction found, forcing '" +
InputSet.getName() + "' to have more vector elements than '" +
VTOperand.getName() + "'");
return false;
}
}
return MadeChange;
}
/// EnforceVectorSameNumElts - 'this' is now constrainted to
/// be a vector with same num elements as VTOperand.
bool EEVT::TypeSet::EnforceVectorSameNumElts(EEVT::TypeSet &VTOperand,
TreePattern &TP) {
if (TP.hasError())
return false;
// "This" must be a vector and "VTOperand" must be a vector.
bool MadeChange = false;
MadeChange |= EnforceVector(TP);
MadeChange |= VTOperand.EnforceVector(TP);
// If we know one of the vector types, it forces the other type to agree.
if (isConcrete()) {
MVT IVT = getConcrete();
unsigned NumElems = IVT.getVectorNumElements();
// Only keep types that have same elements as VTOperand.
TypeSet InputSet(VTOperand);
for (unsigned i = 0; i != VTOperand.TypeVec.size(); ++i) {
assert(isVector(VTOperand.TypeVec[i]) && "EnforceVector didn't work");
if (MVT(VTOperand.TypeVec[i]).getVectorNumElements() != NumElems) {
VTOperand.TypeVec.erase(VTOperand.TypeVec.begin()+i--);
MadeChange = true;
}
}
if (VTOperand.TypeVec.empty()) { // FIXME: Really want an SMLoc here!
TP.error("Type inference contradiction found, forcing '" +
InputSet.getName() + "' to have same number elements as '" +
getName() + "'");
return false;
}
} else if (VTOperand.isConcrete()) {
MVT IVT = VTOperand.getConcrete();
unsigned NumElems = IVT.getVectorNumElements();
// Only keep types that have same elements as 'this'.
TypeSet InputSet(*this);
for (unsigned i = 0; i != TypeVec.size(); ++i) {
assert(isVector(TypeVec[i]) && "EnforceVector didn't work");
if (MVT(TypeVec[i]).getVectorNumElements() != NumElems) {
TypeVec.erase(TypeVec.begin()+i--);
MadeChange = true;
}
}
if (TypeVec.empty()) { // FIXME: Really want an SMLoc here!
TP.error("Type inference contradiction found, forcing '" +
InputSet.getName() + "' to have same number elements than '" +
VTOperand.getName() + "'");
return false;
}
}
return MadeChange;
}
//===----------------------------------------------------------------------===//
// Helpers for working with extended types.
/// Dependent variable map for CodeGenDAGPattern variant generation
typedef std::map<std::string, int> DepVarMap;
/// Const iterator shorthand for DepVarMap
typedef DepVarMap::const_iterator DepVarMap_citer;
static void FindDepVarsOf(TreePatternNode *N, DepVarMap &DepMap) {
if (N->isLeaf()) {
if (isa<DefInit>(N->getLeafValue()))
DepMap[N->getName()]++;
} else {
for (size_t i = 0, e = N->getNumChildren(); i != e; ++i)
FindDepVarsOf(N->getChild(i), DepMap);
}
}
/// Find dependent variables within child patterns
static void FindDepVars(TreePatternNode *N, MultipleUseVarSet &DepVars) {
DepVarMap depcounts;
FindDepVarsOf(N, depcounts);
for (DepVarMap_citer i = depcounts.begin(); i != depcounts.end(); ++i) {
if (i->second > 1) // std::pair<std::string, int>
DepVars.insert(i->first);
}
}
#ifndef NDEBUG
/// Dump the dependent variable set:
static void DumpDepVars(MultipleUseVarSet &DepVars) {
if (DepVars.empty()) {
DEBUG(errs() << "<empty set>");
} else {
DEBUG(errs() << "[ ");
for (MultipleUseVarSet::const_iterator i = DepVars.begin(),
e = DepVars.end(); i != e; ++i) {
DEBUG(errs() << (*i) << " ");
}
DEBUG(errs() << "]");
}
}
#endif
//===----------------------------------------------------------------------===//
// TreePredicateFn Implementation
//===----------------------------------------------------------------------===//
/// TreePredicateFn constructor. Here 'N' is a subclass of PatFrag.
TreePredicateFn::TreePredicateFn(TreePattern *N) : PatFragRec(N) {
assert((getPredCode().empty() || getImmCode().empty()) &&
".td file corrupt: can't have a node predicate *and* an imm predicate");
}
std::string TreePredicateFn::getPredCode() const {
return PatFragRec->getRecord()->getValueAsString("PredicateCode");
}
std::string TreePredicateFn::getImmCode() const {
return PatFragRec->getRecord()->getValueAsString("ImmediateCode");
}
/// isAlwaysTrue - Return true if this is a noop predicate.
bool TreePredicateFn::isAlwaysTrue() const {
return getPredCode().empty() && getImmCode().empty();
}
/// Return the name to use in the generated code to reference this, this is
/// "Predicate_foo" if from a pattern fragment "foo".
std::string TreePredicateFn::getFnName() const {
return "Predicate_" + PatFragRec->getRecord()->getName();
}
/// getCodeToRunOnSDNode - Return the code for the function body that
/// evaluates this predicate. The argument is expected to be in "Node",
/// not N. This handles casting and conversion to a concrete node type as
/// appropriate.
std::string TreePredicateFn::getCodeToRunOnSDNode() const {
// Handle immediate predicates first.
std::string ImmCode = getImmCode();
if (!ImmCode.empty()) {
std::string Result =
" int64_t Imm = cast<ConstantSDNode>(Node)->getSExtValue();\n";
return Result + ImmCode;
}
// Handle arbitrary node predicates.
assert(!getPredCode().empty() && "Don't have any predicate code!");
std::string ClassName;
if (PatFragRec->getOnlyTree()->isLeaf())
ClassName = "SDNode";
else {
Record *Op = PatFragRec->getOnlyTree()->getOperator();
ClassName = PatFragRec->getDAGPatterns().getSDNodeInfo(Op).getSDClassName();
}
std::string Result;
if (ClassName == "SDNode")
Result = " SDNode *N = Node;\n";
else
Result = " " + ClassName + "*N = cast<" + ClassName + ">(Node);\n";
return Result + getPredCode();
}
//===----------------------------------------------------------------------===//
// PatternToMatch implementation
//
/// getPatternSize - Return the 'size' of this pattern. We want to match large
/// patterns before small ones. This is used to determine the size of a
/// pattern.
static unsigned getPatternSize(const TreePatternNode *P,
const CodeGenDAGPatterns &CGP) {
unsigned Size = 3; // The node itself.
// If the root node is a ConstantSDNode, increases its size.
// e.g. (set R32:$dst, 0).
if (P->isLeaf() && isa<IntInit>(P->getLeafValue()))
Size += 2;
// FIXME: This is a hack to statically increase the priority of patterns
// which maps a sub-dag to a complex pattern. e.g. favors LEA over ADD.
// Later we can allow complexity / cost for each pattern to be (optionally)
// specified. To get best possible pattern match we'll need to dynamically
// calculate the complexity of all patterns a dag can potentially map to.
const ComplexPattern *AM = P->getComplexPatternInfo(CGP);
if (AM) {
Size += AM->getNumOperands() * 3;
// We don't want to count any children twice, so return early.
return Size;
}
// If this node has some predicate function that must match, it adds to the
// complexity of this node.
if (!P->getPredicateFns().empty())
++Size;
// Count children in the count if they are also nodes.
for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i) {
TreePatternNode *Child = P->getChild(i);
if (!Child->isLeaf() && Child->getNumTypes() &&
Child->getType(0) != MVT::Other)
Size += getPatternSize(Child, CGP);
else if (Child->isLeaf()) {
if (isa<IntInit>(Child->getLeafValue()))
Size += 5; // Matches a ConstantSDNode (+3) and a specific value (+2).
else if (Child->getComplexPatternInfo(CGP))
Size += getPatternSize(Child, CGP);
else if (!Child->getPredicateFns().empty())
++Size;
}
}
return Size;
}
/// Compute the complexity metric for the input pattern. This roughly
/// corresponds to the number of nodes that are covered.
int PatternToMatch::
getPatternComplexity(const CodeGenDAGPatterns &CGP) const {
return getPatternSize(getSrcPattern(), CGP) + getAddedComplexity();
}
/// getPredicateCheck - Return a single string containing all of this
/// pattern's predicates concatenated with "&&" operators.
///
std::string PatternToMatch::getPredicateCheck() const {
std::string PredicateCheck;
for (Init *I : Predicates->getValues()) {
if (DefInit *Pred = dyn_cast<DefInit>(I)) {
Record *Def = Pred->getDef();
if (!Def->isSubClassOf("Predicate")) {
#ifndef NDEBUG
Def->dump();
#endif
llvm_unreachable("Unknown predicate type!");
}
if (!PredicateCheck.empty())
PredicateCheck += " && ";
PredicateCheck += "(" + Def->getValueAsString("CondString") + ")";
}
}
return PredicateCheck;
}
//===----------------------------------------------------------------------===//
// SDTypeConstraint implementation
//
SDTypeConstraint::SDTypeConstraint(Record *R) {
OperandNo = R->getValueAsInt("OperandNum");
if (R->isSubClassOf("SDTCisVT")) {
ConstraintType = SDTCisVT;
x.SDTCisVT_Info.VT = getValueType(R->getValueAsDef("VT"));
if (x.SDTCisVT_Info.VT == MVT::isVoid)
PrintFatalError(R->getLoc(), "Cannot use 'Void' as type to SDTCisVT");
} else if (R->isSubClassOf("SDTCisPtrTy")) {
ConstraintType = SDTCisPtrTy;
} else if (R->isSubClassOf("SDTCisInt")) {
ConstraintType = SDTCisInt;
} else if (R->isSubClassOf("SDTCisFP")) {
ConstraintType = SDTCisFP;
} else if (R->isSubClassOf("SDTCisVec")) {
ConstraintType = SDTCisVec;
} else if (R->isSubClassOf("SDTCisSameAs")) {
ConstraintType = SDTCisSameAs;
x.SDTCisSameAs_Info.OtherOperandNum = R->getValueAsInt("OtherOperandNum");
} else if (R->isSubClassOf("SDTCisVTSmallerThanOp")) {
ConstraintType = SDTCisVTSmallerThanOp;
x.SDTCisVTSmallerThanOp_Info.OtherOperandNum =
R->getValueAsInt("OtherOperandNum");
} else if (R->isSubClassOf("SDTCisOpSmallerThanOp")) {
ConstraintType = SDTCisOpSmallerThanOp;
x.SDTCisOpSmallerThanOp_Info.BigOperandNum =
R->getValueAsInt("BigOperandNum");
} else if (R->isSubClassOf("SDTCisEltOfVec")) {
ConstraintType = SDTCisEltOfVec;
x.SDTCisEltOfVec_Info.OtherOperandNum = R->getValueAsInt("OtherOpNum");
} else if (R->isSubClassOf("SDTCisSubVecOfVec")) {
ConstraintType = SDTCisSubVecOfVec;
x.SDTCisSubVecOfVec_Info.OtherOperandNum =
R->getValueAsInt("OtherOpNum");
} else if (R->isSubClassOf("SDTCVecEltisVT")) {
ConstraintType = SDTCVecEltisVT;
x.SDTCVecEltisVT_Info.VT = getValueType(R->getValueAsDef("VT"));
if (MVT(x.SDTCVecEltisVT_Info.VT).isVector())
PrintFatalError(R->getLoc(), "Cannot use vector type as SDTCVecEltisVT");
if (!MVT(x.SDTCVecEltisVT_Info.VT).isInteger() &&
!MVT(x.SDTCVecEltisVT_Info.VT).isFloatingPoint())
PrintFatalError(R->getLoc(), "Must use integer or floating point type "
"as SDTCVecEltisVT");
} else if (R->isSubClassOf("SDTCisSameNumEltsAs")) {
ConstraintType = SDTCisSameNumEltsAs;
x.SDTCisSameNumEltsAs_Info.OtherOperandNum =
R->getValueAsInt("OtherOperandNum");
} else {
PrintFatalError("Unrecognized SDTypeConstraint '" + R->getName() + "'!\n");
}
}
/// getOperandNum - Return the node corresponding to operand #OpNo in tree
/// N, and the result number in ResNo.
static TreePatternNode *getOperandNum(unsigned OpNo, TreePatternNode *N,
const SDNodeInfo &NodeInfo,
unsigned &ResNo) {
unsigned NumResults = NodeInfo.getNumResults();
if (OpNo < NumResults) {
ResNo = OpNo;
return N;
}
OpNo -= NumResults;
if (OpNo >= N->getNumChildren()) {
std::string S;
raw_string_ostream OS(S);
OS << "Invalid operand number in type constraint "
<< (OpNo+NumResults) << " ";
N->print(OS);
PrintFatalError(OS.str());
}
return N->getChild(OpNo);
}
/// ApplyTypeConstraint - Given a node in a pattern, apply this type
/// constraint to the nodes operands. This returns true if it makes a
/// change, false otherwise. If a type contradiction is found, flag an error.
bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
const SDNodeInfo &NodeInfo,
TreePattern &TP) const {
if (TP.hasError())
return false;
unsigned ResNo = 0; // The result number being referenced.
TreePatternNode *NodeToApply = getOperandNum(OperandNo, N, NodeInfo, ResNo);
switch (ConstraintType) {
case SDTCisVT:
// Operand must be a particular type.
return NodeToApply->UpdateNodeType(ResNo, x.SDTCisVT_Info.VT, TP);
case SDTCisPtrTy:
// Operand must be same as target pointer type.
return NodeToApply->UpdateNodeType(ResNo, MVT::iPTR, TP);
case SDTCisInt:
// Require it to be one of the legal integer VTs.
return NodeToApply->getExtType(ResNo).EnforceInteger(TP);
case SDTCisFP:
// Require it to be one of the legal fp VTs.
return NodeToApply->getExtType(ResNo).EnforceFloatingPoint(TP);
case SDTCisVec:
// Require it to be one of the legal vector VTs.
return NodeToApply->getExtType(ResNo).EnforceVector(TP);
case SDTCisSameAs: {
unsigned OResNo = 0;
TreePatternNode *OtherNode =
getOperandNum(x.SDTCisSameAs_Info.OtherOperandNum, N, NodeInfo, OResNo);
return (int) NodeToApply->UpdateNodeType(ResNo,
OtherNode->getExtType(OResNo),TP) |
(int) OtherNode->UpdateNodeType(OResNo,
NodeToApply->getExtType(ResNo),TP);
}
case SDTCisVTSmallerThanOp: {
// The NodeToApply must be a leaf node that is a VT. OtherOperandNum must
// have an integer type that is smaller than the VT.
if (!NodeToApply->isLeaf() ||
!isa<DefInit>(NodeToApply->getLeafValue()) ||
!static_cast<DefInit*>(NodeToApply->getLeafValue())->getDef()
->isSubClassOf("ValueType")) {
TP.error(N->getOperator()->getName() + " expects a VT operand!");
return false;
}
MVT::SimpleValueType VT =
getValueType(static_cast<DefInit*>(NodeToApply->getLeafValue())->getDef());
EEVT::TypeSet TypeListTmp(VT, TP);
unsigned OResNo = 0;
TreePatternNode *OtherNode =
getOperandNum(x.SDTCisVTSmallerThanOp_Info.OtherOperandNum, N, NodeInfo,
OResNo);
return TypeListTmp.EnforceSmallerThan(OtherNode->getExtType(OResNo), TP);
}
case SDTCisOpSmallerThanOp: {
unsigned BResNo = 0;
TreePatternNode *BigOperand =
getOperandNum(x.SDTCisOpSmallerThanOp_Info.BigOperandNum, N, NodeInfo,
BResNo);
return NodeToApply->getExtType(ResNo).
EnforceSmallerThan(BigOperand->getExtType(BResNo), TP);
}
case SDTCisEltOfVec: {
unsigned VResNo = 0;
TreePatternNode *VecOperand =
getOperandNum(x.SDTCisEltOfVec_Info.OtherOperandNum, N, NodeInfo,
VResNo);
// Filter vector types out of VecOperand that don't have the right element
// type.
return VecOperand->getExtType(VResNo).
EnforceVectorEltTypeIs(NodeToApply->getExtType(ResNo), TP);
}
case SDTCisSubVecOfVec: {
unsigned VResNo = 0;
TreePatternNode *BigVecOperand =
getOperandNum(x.SDTCisSubVecOfVec_Info.OtherOperandNum, N, NodeInfo,
VResNo);
// Filter vector types out of BigVecOperand that don't have the
// right subvector type.
return BigVecOperand->getExtType(VResNo).
EnforceVectorSubVectorTypeIs(NodeToApply->getExtType(ResNo), TP);
}
case SDTCVecEltisVT: {
return NodeToApply->getExtType(ResNo).
EnforceVectorEltTypeIs(x.SDTCVecEltisVT_Info.VT, TP);
}
case SDTCisSameNumEltsAs: {
unsigned OResNo = 0;
TreePatternNode *OtherNode =
getOperandNum(x.SDTCisSameNumEltsAs_Info.OtherOperandNum,
N, NodeInfo, OResNo);
return OtherNode->getExtType(OResNo).
EnforceVectorSameNumElts(NodeToApply->getExtType(ResNo), TP);
}
}
llvm_unreachable("Invalid ConstraintType!");
}
// Update the node type to match an instruction operand or result as specified
// in the ins or outs lists on the instruction definition. Return true if the
// type was actually changed.
bool TreePatternNode::UpdateNodeTypeFromInst(unsigned ResNo,
Record *Operand,
TreePattern &TP) {
// The 'unknown' operand indicates that types should be inferred from the
// context.
if (Operand->isSubClassOf("unknown_class"))
return false;
// The Operand class specifies a type directly.
if (Operand->isSubClassOf("Operand"))
return UpdateNodeType(ResNo, getValueType(Operand->getValueAsDef("Type")),
TP);
// PointerLikeRegClass has a type that is determined at runtime.
if (Operand->isSubClassOf("PointerLikeRegClass"))
return UpdateNodeType(ResNo, MVT::iPTR, TP);
// Both RegisterClass and RegisterOperand operands derive their types from a
// register class def.
Record *RC = nullptr;
if (Operand->isSubClassOf("RegisterClass"))
RC = Operand;
else if (Operand->isSubClassOf("RegisterOperand"))
RC = Operand->getValueAsDef("RegClass");
assert(RC && "Unknown operand type");
CodeGenTarget &Tgt = TP.getDAGPatterns().getTargetInfo();
return UpdateNodeType(ResNo, Tgt.getRegisterClass(RC).getValueTypes(), TP);
}
//===----------------------------------------------------------------------===//
// SDNodeInfo implementation
//
SDNodeInfo::SDNodeInfo(Record *R) : Def(R) {
EnumName = R->getValueAsString("Opcode");
SDClassName = R->getValueAsString("SDClass");
Record *TypeProfile = R->getValueAsDef("TypeProfile");
NumResults = TypeProfile->getValueAsInt("NumResults");
NumOperands = TypeProfile->getValueAsInt("NumOperands");
// Parse the properties.
Properties = 0;
std::vector<Record*> PropList = R->getValueAsListOfDefs("Properties");
for (unsigned i = 0, e = PropList.size(); i != e; ++i) {
if (PropList[i]->getName() == "SDNPCommutative") {
Properties |= 1 << SDNPCommutative;
} else if (PropList[i]->getName() == "SDNPAssociative") {
Properties |= 1 << SDNPAssociative;
} else if (PropList[i]->getName() == "SDNPHasChain") {
Properties |= 1 << SDNPHasChain;
} else if (PropList[i]->getName() == "SDNPOutGlue") {
Properties |= 1 << SDNPOutGlue;
} else if (PropList[i]->getName() == "SDNPInGlue") {
Properties |= 1 << SDNPInGlue;
} else if (PropList[i]->getName() == "SDNPOptInGlue") {
Properties |= 1 << SDNPOptInGlue;
} else if (PropList[i]->getName() == "SDNPMayStore") {
Properties |= 1 << SDNPMayStore;
} else if (PropList[i]->getName() == "SDNPMayLoad") {
Properties |= 1 << SDNPMayLoad;
} else if (PropList[i]->getName() == "SDNPSideEffect") {
Properties |= 1 << SDNPSideEffect;
} else if (PropList[i]->getName() == "SDNPMemOperand") {
Properties |= 1 << SDNPMemOperand;
} else if (PropList[i]->getName() == "SDNPVariadic") {
Properties |= 1 << SDNPVariadic;
} else {
PrintFatalError("Unknown SD Node property '" +
PropList[i]->getName() + "' on node '" +
R->getName() + "'!");
}
}
// Parse the type constraints.
std::vector<Record*> ConstraintList =
TypeProfile->getValueAsListOfDefs("Constraints");
TypeConstraints.assign(ConstraintList.begin(), ConstraintList.end());
}
/// getKnownType - If the type constraints on this node imply a fixed type
/// (e.g. all stores return void, etc), then return it as an
/// MVT::SimpleValueType. Otherwise, return EEVT::Other.
MVT::SimpleValueType SDNodeInfo::getKnownType(unsigned ResNo) const {
unsigned NumResults = getNumResults();
assert(NumResults <= 1 &&
"We only work with nodes with zero or one result so far!");
assert(ResNo == 0 && "Only handles single result nodes so far");
for (unsigned i = 0, e = TypeConstraints.size(); i != e; ++i) {
// Make sure that this applies to the correct node result.
if (TypeConstraints[i].OperandNo >= NumResults) // FIXME: need value #
continue;
switch (TypeConstraints[i].ConstraintType) {
default: break;
case SDTypeConstraint::SDTCisVT:
return TypeConstraints[i].x.SDTCisVT_Info.VT;
case SDTypeConstraint::SDTCisPtrTy:
return MVT::iPTR;
}
}
return MVT::Other;
}
//===----------------------------------------------------------------------===//
// TreePatternNode implementation
//
TreePatternNode::~TreePatternNode() {
#if 0 // FIXME: implement refcounted tree nodes!
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
delete getChild(i);
#endif
}
static unsigned GetNumNodeResults(Record *Operator, CodeGenDAGPatterns &CDP) {
if (Operator->getName() == "set" ||
Operator->getName() == "implicit")
return 0; // All return nothing.
if (Operator->isSubClassOf("Intrinsic"))
return CDP.getIntrinsic(Operator).IS.RetVTs.size();
if (Operator->isSubClassOf("SDNode"))
return CDP.getSDNodeInfo(Operator).getNumResults();
if (Operator->isSubClassOf("PatFrag")) {
// If we've already parsed this pattern fragment, get it. Otherwise, handle
// the forward reference case where one pattern fragment references another
// before it is processed.
if (TreePattern *PFRec = CDP.getPatternFragmentIfRead(Operator))
return PFRec->getOnlyTree()->getNumTypes();
// Get the result tree.
DagInit *Tree = Operator->getValueAsDag("Fragment");
Record *Op = nullptr;
if (Tree)
if (DefInit *DI = dyn_cast<DefInit>(Tree->getOperator()))
Op = DI->getDef();
assert(Op && "Invalid Fragment");
return GetNumNodeResults(Op, CDP);
}
if (Operator->isSubClassOf("Instruction")) {
CodeGenInstruction &InstInfo = CDP.getTargetInfo().getInstruction(Operator);
unsigned NumDefsToAdd = InstInfo.Operands.NumDefs;
// Subtract any defaulted outputs.
for (unsigned i = 0; i != InstInfo.Operands.NumDefs; ++i) {
Record *OperandNode = InstInfo.Operands[i].Rec;
if (OperandNode->isSubClassOf("OperandWithDefaultOps") &&
!CDP.getDefaultOperand(OperandNode).DefaultOps.empty())
--NumDefsToAdd;
}
// Add on one implicit def if it has a resolvable type.
if (InstInfo.HasOneImplicitDefWithKnownVT(CDP.getTargetInfo()) !=MVT::Other)
++NumDefsToAdd;
return NumDefsToAdd;
}
if (Operator->isSubClassOf("SDNodeXForm"))
return 1; // FIXME: Generalize SDNodeXForm
if (Operator->isSubClassOf("ValueType"))
return 1; // A type-cast of one result.
if (Operator->isSubClassOf("ComplexPattern"))
return 1;
Operator->dump();
PrintFatalError("Unhandled node in GetNumNodeResults");
}
void TreePatternNode::print(raw_ostream &OS) const {
if (isLeaf())
OS << *getLeafValue();
else
OS << '(' << getOperator()->getName();
for (unsigned i = 0, e = Types.size(); i != e; ++i)
OS << ':' << getExtType(i).getName();
if (!isLeaf()) {
if (getNumChildren() != 0) {
OS << " ";
getChild(0)->print(OS);
for (unsigned i = 1, e = getNumChildren(); i != e; ++i) {
OS << ", ";
getChild(i)->print(OS);
}
}
OS << ")";
}
for (unsigned i = 0, e = PredicateFns.size(); i != e; ++i)
OS << "<<P:" << PredicateFns[i].getFnName() << ">>";
if (TransformFn)
OS << "<<X:" << TransformFn->getName() << ">>";
if (!getName().empty())
OS << ":$" << getName();
}
void TreePatternNode::dump() const {
print(errs());
}
/// isIsomorphicTo - Return true if this node is recursively
/// isomorphic to the specified node. For this comparison, the node's
/// entire state is considered. The assigned name is ignored, since
/// nodes with differing names are considered isomorphic. However, if
/// the assigned name is present in the dependent variable set, then
/// the assigned name is considered significant and the node is
/// isomorphic if the names match.
bool TreePatternNode::isIsomorphicTo(const TreePatternNode *N,
const MultipleUseVarSet &DepVars) const {
if (N == this) return true;
if (N->isLeaf() != isLeaf() || getExtTypes() != N->getExtTypes() ||
getPredicateFns() != N->getPredicateFns() ||
getTransformFn() != N->getTransformFn())
return false;
if (isLeaf()) {
if (DefInit *DI = dyn_cast<DefInit>(getLeafValue())) {
if (DefInit *NDI = dyn_cast<DefInit>(N->getLeafValue())) {
return ((DI->getDef() == NDI->getDef())
&& (DepVars.find(getName()) == DepVars.end()
|| getName() == N->getName()));
}
}
return getLeafValue() == N->getLeafValue();
}
if (N->getOperator() != getOperator() ||
N->getNumChildren() != getNumChildren()) return false;
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
if (!getChild(i)->isIsomorphicTo(N->getChild(i), DepVars))
return false;
return true;
}
/// clone - Make a copy of this tree and all of its children.
///
TreePatternNode *TreePatternNode::clone() const {
TreePatternNode *New;
if (isLeaf()) {
New = new TreePatternNode(getLeafValue(), getNumTypes());
} else {
std::vector<TreePatternNode*> CChildren;
CChildren.reserve(Children.size());
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
CChildren.push_back(getChild(i)->clone());
New = new TreePatternNode(getOperator(), CChildren, getNumTypes());
}
New->setName(getName());
New->Types = Types;
New->setPredicateFns(getPredicateFns());
New->setTransformFn(getTransformFn());
return New;
}
/// RemoveAllTypes - Recursively strip all the types of this tree.
void TreePatternNode::RemoveAllTypes() {
for (unsigned i = 0, e = Types.size(); i != e; ++i)
Types[i] = EEVT::TypeSet(); // Reset to unknown type.
if (isLeaf()) return;
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
getChild(i)->RemoveAllTypes();
}
/// SubstituteFormalArguments - Replace the formal arguments in this tree
/// with actual values specified by ArgMap.
void TreePatternNode::
SubstituteFormalArguments(std::map<std::string, TreePatternNode*> &ArgMap) {
if (isLeaf()) return;
for (unsigned i = 0, e = getNumChildren(); i != e; ++i) {
TreePatternNode *Child = getChild(i);
if (Child->isLeaf()) {
Init *Val = Child->getLeafValue();
// Note that, when substituting into an output pattern, Val might be an
// UnsetInit.
if (isa<UnsetInit>(Val) || (isa<DefInit>(Val) &&
cast<DefInit>(Val)->getDef()->getName() == "node")) {
// We found a use of a formal argument, replace it with its value.
TreePatternNode *NewChild = ArgMap[Child->getName()];
assert(NewChild && "Couldn't find formal argument!");
assert((Child->getPredicateFns().empty() ||
NewChild->getPredicateFns() == Child->getPredicateFns()) &&
"Non-empty child predicate clobbered!");
setChild(i, NewChild);
}
} else {
getChild(i)->SubstituteFormalArguments(ArgMap);
}
}
}
/// InlinePatternFragments - If this pattern refers to any pattern
/// fragments, inline them into place, giving us a pattern without any
/// PatFrag references.
TreePatternNode *TreePatternNode::InlinePatternFragments(TreePattern &TP) {
if (TP.hasError())
return nullptr;
if (isLeaf())
return this; // nothing to do.
Record *Op = getOperator();
if (!Op->isSubClassOf("PatFrag")) {
// Just recursively inline children nodes.
for (unsigned i = 0, e = getNumChildren(); i != e; ++i) {
TreePatternNode *Child = getChild(i);
TreePatternNode *NewChild = Child->InlinePatternFragments(TP);
assert((Child->getPredicateFns().empty() ||
NewChild->getPredicateFns() == Child->getPredicateFns()) &&
"Non-empty child predicate clobbered!");
setChild(i, NewChild);
}
return this;
}
// Otherwise, we found a reference to a fragment. First, look up its
// TreePattern record.
TreePattern *Frag = TP.getDAGPatterns().getPatternFragment(Op);
// Verify that we are passing the right number of operands.
if (Frag->getNumArgs() != Children.size()) {
TP.error("'" + Op->getName() + "' fragment requires " +
utostr(Frag->getNumArgs()) + " operands!");
return nullptr;
}
TreePatternNode *FragTree = Frag->getOnlyTree()->clone();
TreePredicateFn PredFn(Frag);
if (!PredFn.isAlwaysTrue())
FragTree->addPredicateFn(PredFn);
// Resolve formal arguments to their actual value.
if (Frag->getNumArgs()) {
// Compute the map of formal to actual arguments.
std::map<std::string, TreePatternNode*> ArgMap;
for (unsigned i = 0, e = Frag->getNumArgs(); i != e; ++i)
ArgMap[Frag->getArgName(i)] = getChild(i)->InlinePatternFragments(TP);
FragTree->SubstituteFormalArguments(ArgMap);
}
FragTree->setName(getName());
for (unsigned i = 0, e = Types.size(); i != e; ++i)
FragTree->UpdateNodeType(i, getExtType(i), TP);
// Transfer in the old predicates.
for (unsigned i = 0, e = getPredicateFns().size(); i != e; ++i)
FragTree->addPredicateFn(getPredicateFns()[i]);
// Get a new copy of this fragment to stitch into here.
//delete this; // FIXME: implement refcounting!
// The fragment we inlined could have recursive inlining that is needed. See
// if there are any pattern fragments in it and inline them as needed.
return FragTree->InlinePatternFragments(TP);
}
/// getImplicitType - Check to see if the specified record has an implicit
/// type which should be applied to it. This will infer the type of register
/// references from the register file information, for example.
///
/// When Unnamed is set, return the type of a DAG operand with no name, such as
/// the F8RC register class argument in:
///
/// (COPY_TO_REGCLASS GPR:$src, F8RC)
///
/// When Unnamed is false, return the type of a named DAG operand such as the
/// GPR:$src operand above.
///
static EEVT::TypeSet getImplicitType(Record *R, unsigned ResNo,
bool NotRegisters,
bool Unnamed,
TreePattern &TP) {
// Check to see if this is a register operand.
if (R->isSubClassOf("RegisterOperand")) {
assert(ResNo == 0 && "Regoperand ref only has one result!");
if (NotRegisters)
return EEVT::TypeSet(); // Unknown.
Record *RegClass = R->getValueAsDef("RegClass");
const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
return EEVT::TypeSet(T.getRegisterClass(RegClass).getValueTypes());
}
// Check to see if this is a register or a register class.
if (R->isSubClassOf("RegisterClass")) {
assert(ResNo == 0 && "Regclass ref only has one result!");
// An unnamed register class represents itself as an i32 immediate, for
// example on a COPY_TO_REGCLASS instruction.
if (Unnamed)
return EEVT::TypeSet(MVT::i32, TP);
// In a named operand, the register class provides the possible set of
// types.
if (NotRegisters)
return EEVT::TypeSet(); // Unknown.
const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
return EEVT::TypeSet(T.getRegisterClass(R).getValueTypes());
}
if (R->isSubClassOf("PatFrag")) {
assert(ResNo == 0 && "FIXME: PatFrag with multiple results?");
// Pattern fragment types will be resolved when they are inlined.
return EEVT::TypeSet(); // Unknown.
}
if (R->isSubClassOf("Register")) {
assert(ResNo == 0 && "Registers only produce one result!");
if (NotRegisters)
return EEVT::TypeSet(); // Unknown.
const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
return EEVT::TypeSet(T.getRegisterVTs(R));
}
if (R->isSubClassOf("SubRegIndex")) {
assert(ResNo == 0 && "SubRegisterIndices only produce one result!");
return EEVT::TypeSet(MVT::i32, TP);
}
if (R->isSubClassOf("ValueType")) {
assert(ResNo == 0 && "This node only has one result!");
// An unnamed VTSDNode represents itself as an MVT::Other immediate.
//
// (sext_inreg GPR:$src, i16)
// ~~~
if (Unnamed)
return EEVT::TypeSet(MVT::Other, TP);
// With a name, the ValueType simply provides the type of the named
// variable.
//
// (sext_inreg i32:$src, i16)
// ~~~~~~~~
if (NotRegisters)
return EEVT::TypeSet(); // Unknown.
return EEVT::TypeSet(getValueType(R), TP);
}
if (R->isSubClassOf("CondCode")) {
assert(ResNo == 0 && "This node only has one result!");
// Using a CondCodeSDNode.
return EEVT::TypeSet(MVT::Other, TP);
}
if (R->isSubClassOf("ComplexPattern")) {
assert(ResNo == 0 && "FIXME: ComplexPattern with multiple results?");
if (NotRegisters)
return EEVT::TypeSet(); // Unknown.
return EEVT::TypeSet(TP.getDAGPatterns().getComplexPattern(R).getValueType(),
TP);
}
if (R->isSubClassOf("PointerLikeRegClass")) {
assert(ResNo == 0 && "Regclass can only have one result!");
return EEVT::TypeSet(MVT::iPTR, TP);
}
if (R->getName() == "node" || R->getName() == "srcvalue" ||
R->getName() == "zero_reg") {
// Placeholder.
return EEVT::TypeSet(); // Unknown.
}
if (R->isSubClassOf("Operand"))
return EEVT::TypeSet(getValueType(R->getValueAsDef("Type")));
TP.error("Unknown node flavor used in pattern: " + R->getName());
return EEVT::TypeSet(MVT::Other, TP);
}
/// getIntrinsicInfo - If this node corresponds to an intrinsic, return the
/// CodeGenIntrinsic information for it, otherwise return a null pointer.
const CodeGenIntrinsic *TreePatternNode::
getIntrinsicInfo(const CodeGenDAGPatterns &CDP) const {
if (getOperator() != CDP.get_intrinsic_void_sdnode() &&
getOperator() != CDP.get_intrinsic_w_chain_sdnode() &&
getOperator() != CDP.get_intrinsic_wo_chain_sdnode())
return nullptr;
unsigned IID = cast<IntInit>(getChild(0)->getLeafValue())->getValue();
return &CDP.getIntrinsicInfo(IID);
}
/// getComplexPatternInfo - If this node corresponds to a ComplexPattern,
/// return the ComplexPattern information, otherwise return null.
const ComplexPattern *
TreePatternNode::getComplexPatternInfo(const CodeGenDAGPatterns &CGP) const {
Record *Rec;
if (isLeaf()) {
DefInit *DI = dyn_cast<DefInit>(getLeafValue());
if (!DI)
return nullptr;
Rec = DI->getDef();
} else
Rec = getOperator();
if (!Rec->isSubClassOf("ComplexPattern"))
return nullptr;
return &CGP.getComplexPattern(Rec);
}
unsigned TreePatternNode::getNumMIResults(const CodeGenDAGPatterns &CGP) const {
// A ComplexPattern specifically declares how many results it fills in.
if (const ComplexPattern *CP = getComplexPatternInfo(CGP))
return CP->getNumOperands();
// If MIOperandInfo is specified, that gives the count.
if (isLeaf()) {
DefInit *DI = dyn_cast<DefInit>(getLeafValue());
if (DI && DI->getDef()->isSubClassOf("Operand")) {
DagInit *MIOps = DI->getDef()->getValueAsDag("MIOperandInfo");
if (MIOps->getNumArgs())
return MIOps->getNumArgs();
}
}
// Otherwise there is just one result.
return 1;
}
/// NodeHasProperty - Return true if this node has the specified property.
bool TreePatternNode::NodeHasProperty(SDNP Property,
const CodeGenDAGPatterns &CGP) const {
if (isLeaf()) {
if (const ComplexPattern *CP = getComplexPatternInfo(CGP))
return CP->hasProperty(Property);
return false;
}
Record *Operator = getOperator();
if (!Operator->isSubClassOf("SDNode")) return false;
return CGP.getSDNodeInfo(Operator).hasProperty(Property);
}
/// TreeHasProperty - Return true if any node in this tree has the specified
/// property.
bool TreePatternNode::TreeHasProperty(SDNP Property,
const CodeGenDAGPatterns &CGP) const {
if (NodeHasProperty(Property, CGP))
return true;
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
if (getChild(i)->TreeHasProperty(Property, CGP))
return true;
return false;
}
/// isCommutativeIntrinsic - Return true if the node corresponds to a
/// commutative intrinsic.
bool
TreePatternNode::isCommutativeIntrinsic(const CodeGenDAGPatterns &CDP) const {
if (const CodeGenIntrinsic *Int = getIntrinsicInfo(CDP))
return Int->isCommutative;
return false;
}
static bool isOperandClass(const TreePatternNode *N, StringRef Class) {
if (!N->isLeaf())
return N->getOperator()->isSubClassOf(Class);
DefInit *DI = dyn_cast<DefInit>(N->getLeafValue());
if (DI && DI->getDef()->isSubClassOf(Class))
return true;
return false;
}
static void emitTooManyOperandsError(TreePattern &TP,
StringRef InstName,
unsigned Expected,
unsigned Actual) {
TP.error("Instruction '" + InstName + "' was provided " + Twine(Actual) +
" operands but expected only " + Twine(Expected) + "!");
}
static void emitTooFewOperandsError(TreePattern &TP,
StringRef InstName,
unsigned Actual) {
TP.error("Instruction '" + InstName +
"' expects more than the provided " + Twine(Actual) + " operands!");
}
/// ApplyTypeConstraints - Apply all of the type constraints relevant to
/// this node and its children in the tree. This returns true if it makes a
/// change, false otherwise. If a type contradiction is found, flag an error.
bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
if (TP.hasError())
return false;
CodeGenDAGPatterns &CDP = TP.getDAGPatterns();
if (isLeaf()) {
if (DefInit *DI = dyn_cast<DefInit>(getLeafValue())) {
// If it's a regclass or something else known, include the type.
bool MadeChange = false;
for (unsigned i = 0, e = Types.size(); i != e; ++i)
MadeChange |= UpdateNodeType(i, getImplicitType(DI->getDef(), i,
NotRegisters,
!hasName(), TP), TP);
return MadeChange;
}
if (IntInit *II = dyn_cast<IntInit>(getLeafValue())) {
assert(Types.size() == 1 && "Invalid IntInit");
// Int inits are always integers. :)
bool MadeChange = Types[0].EnforceInteger(TP);
if (!Types[0].isConcrete())
return MadeChange;
MVT::SimpleValueType VT = getType(0);
if (VT == MVT::iPTR || VT == MVT::iPTRAny)
return MadeChange;
unsigned Size = MVT(VT).getSizeInBits();
// Make sure that the value is representable for this type.
if (Size >= 32) return MadeChange;
// Check that the value doesn't use more bits than we have. It must either
// be a sign- or zero-extended equivalent of the original.
int64_t SignBitAndAbove = II->getValue() >> (Size - 1);
if (SignBitAndAbove == -1 || SignBitAndAbove == 0 || SignBitAndAbove == 1)
return MadeChange;
TP.error("Integer value '" + itostr(II->getValue()) +
"' is out of range for type '" + getEnumName(getType(0)) + "'!");
return false;
}
return false;
}
// special handling for set, which isn't really an SDNode.
if (getOperator()->getName() == "set") {
assert(getNumTypes() == 0 && "Set doesn't produce a value");
assert(getNumChildren() >= 2 && "Missing RHS of a set?");
unsigned NC = getNumChildren();
TreePatternNode *SetVal = getChild(NC-1);
bool MadeChange = SetVal->ApplyTypeConstraints(TP, NotRegisters);
for (unsigned i = 0; i < NC-1; ++i) {
TreePatternNode *Child = getChild(i);
MadeChange |= Child->ApplyTypeConstraints(TP, NotRegisters);
// Types of operands must match.
MadeChange |= Child->UpdateNodeType(0, SetVal->getExtType(i), TP);
MadeChange |= SetVal->UpdateNodeType(i, Child->getExtType(0), TP);
}
return MadeChange;
}
if (getOperator()->getName() == "implicit") {
assert(getNumTypes() == 0 && "Node doesn't produce a value");
bool MadeChange = false;
for (unsigned i = 0; i < getNumChildren(); ++i)
MadeChange = getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
return MadeChange;
}
if (const CodeGenIntrinsic *Int = getIntrinsicInfo(CDP)) {
bool MadeChange = false;
// Apply the result type to the node.
unsigned NumRetVTs = Int->IS.RetVTs.size();
unsigned NumParamVTs = Int->IS.ParamVTs.size();
for (unsigned i = 0, e = NumRetVTs; i != e; ++i)
MadeChange |= UpdateNodeType(i, Int->IS.RetVTs[i], TP);
if (getNumChildren() != NumParamVTs + 1) {
TP.error("Intrinsic '" + Int->Name + "' expects " +
utostr(NumParamVTs) + " operands, not " +
utostr(getNumChildren() - 1) + " operands!");
return false;
}
// Apply type info to the intrinsic ID.
MadeChange |= getChild(0)->UpdateNodeType(0, MVT::iPTR, TP);
for (unsigned i = 0, e = getNumChildren()-1; i != e; ++i) {
MadeChange |= getChild(i+1)->ApplyTypeConstraints(TP, NotRegisters);
MVT::SimpleValueType OpVT = Int->IS.ParamVTs[i];
assert(getChild(i+1)->getNumTypes() == 1 && "Unhandled case");
MadeChange |= getChild(i+1)->UpdateNodeType(0, OpVT, TP);
}
return MadeChange;
}
if (getOperator()->isSubClassOf("SDNode")) {
const SDNodeInfo &NI = CDP.getSDNodeInfo(getOperator());
// Check that the number of operands is sane. Negative operands -> varargs.
if (NI.getNumOperands() >= 0 &&
getNumChildren() != (unsigned)NI.getNumOperands()) {
TP.error(getOperator()->getName() + " node requires exactly " +
itostr(NI.getNumOperands()) + " operands!");
return false;
}
bool MadeChange = NI.ApplyTypeConstraints(this, TP);
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
MadeChange |= getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
return MadeChange;
}
if (getOperator()->isSubClassOf("Instruction")) {
const DAGInstruction &Inst = CDP.getInstruction(getOperator());
CodeGenInstruction &InstInfo =
CDP.getTargetInfo().getInstruction(getOperator());
bool MadeChange = false;
// Apply the result types to the node, these come from the things in the
// (outs) list of the instruction.
unsigned NumResultsToAdd = std::min(InstInfo.Operands.NumDefs,
Inst.getNumResults());
for (unsigned ResNo = 0; ResNo != NumResultsToAdd; ++ResNo)
MadeChange |= UpdateNodeTypeFromInst(ResNo, Inst.getResult(ResNo), TP);
// If the instruction has implicit defs, we apply the first one as a result.
// FIXME: This sucks, it should apply all implicit defs.
if (!InstInfo.ImplicitDefs.empty()) {
unsigned ResNo = NumResultsToAdd;
// FIXME: Generalize to multiple possible types and multiple possible
// ImplicitDefs.
MVT::SimpleValueType VT =
InstInfo.HasOneImplicitDefWithKnownVT(CDP.getTargetInfo());
if (VT != MVT::Other)
MadeChange |= UpdateNodeType(ResNo, VT, TP);
}
// If this is an INSERT_SUBREG, constrain the source and destination VTs to
// be the same.
if (getOperator()->getName() == "INSERT_SUBREG") {
assert(getChild(0)->getNumTypes() == 1 && "FIXME: Unhandled");
MadeChange |= UpdateNodeType(0, getChild(0)->getExtType(0), TP);
MadeChange |= getChild(0)->UpdateNodeType(0, getExtType(0), TP);
} else if (getOperator()->getName() == "REG_SEQUENCE") {
// We need to do extra, custom typechecking for REG_SEQUENCE since it is
// variadic.
unsigned NChild = getNumChildren();
if (NChild < 3) {
TP.error("REG_SEQUENCE requires at least 3 operands!");
return false;
}
if (NChild % 2 == 0) {
TP.error("REG_SEQUENCE requires an odd number of operands!");
return false;
}
if (!isOperandClass(getChild(0), "RegisterClass")) {
TP.error("REG_SEQUENCE requires a RegisterClass for first operand!");
return false;
}
for (unsigned I = 1; I < NChild; I += 2) {
TreePatternNode *SubIdxChild = getChild(I + 1);
if (!isOperandClass(SubIdxChild, "SubRegIndex")) {
TP.error("REG_SEQUENCE requires a SubRegIndex for operand " +
itostr(I + 1) + "!");
return false;
}
}
}
unsigned ChildNo = 0;
for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i) {
Record *OperandNode = Inst.getOperand(i);
// If the instruction expects a predicate or optional def operand, we
// codegen this by setting the operand to it's default value if it has a
// non-empty DefaultOps field.
if (OperandNode->isSubClassOf("OperandWithDefaultOps") &&
!CDP.getDefaultOperand(OperandNode).DefaultOps.empty())
continue;
// Verify that we didn't run out of provided operands.
if (ChildNo >= getNumChildren()) {
emitTooFewOperandsError(TP, getOperator()->getName(), getNumChildren());
return false;
}
TreePatternNode *Child = getChild(ChildNo++);
unsigned ChildResNo = 0; // Instructions always use res #0 of their op.
// If the operand has sub-operands, they may be provided by distinct
// child patterns, so attempt to match each sub-operand separately.
if (OperandNode->isSubClassOf("Operand")) {
DagInit *MIOpInfo = OperandNode->getValueAsDag("MIOperandInfo");
if (unsigned NumArgs = MIOpInfo->getNumArgs()) {
// But don't do that if the whole operand is being provided by
// a single ComplexPattern-related Operand.
if (Child->getNumMIResults(CDP) < NumArgs) {
// Match first sub-operand against the child we already have.
Record *SubRec = cast<DefInit>(MIOpInfo->getArg(0))->getDef();
MadeChange |=
Child->UpdateNodeTypeFromInst(ChildResNo, SubRec, TP);
// And the remaining sub-operands against subsequent children.
for (unsigned Arg = 1; Arg < NumArgs; ++Arg) {
if (ChildNo >= getNumChildren()) {
emitTooFewOperandsError(TP, getOperator()->getName(),
getNumChildren());
return false;
}
Child = getChild(ChildNo++);
SubRec = cast<DefInit>(MIOpInfo->getArg(Arg))->getDef();
MadeChange |=
Child->UpdateNodeTypeFromInst(ChildResNo, SubRec, TP);
}
continue;
}
}
}
// If we didn't match by pieces above, attempt to match the whole
// operand now.
MadeChange |= Child->UpdateNodeTypeFromInst(ChildResNo, OperandNode, TP);
}
if (!InstInfo.Operands.isVariadic && ChildNo != getNumChildren()) {
emitTooManyOperandsError(TP, getOperator()->getName(),
ChildNo, getNumChildren());
return false;
}
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
MadeChange |= getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
return MadeChange;
}
if (getOperator()->isSubClassOf("ComplexPattern")) {
bool MadeChange = false;
for (unsigned i = 0; i < getNumChildren(); ++i)
MadeChange |= getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
return MadeChange;
}
assert(getOperator()->isSubClassOf("SDNodeXForm") && "Unknown node type!");
// Node transforms always take one operand.
if (getNumChildren() != 1) {
TP.error("Node transform '" + getOperator()->getName() +
"' requires one operand!");
return false;
}
bool MadeChange = getChild(0)->ApplyTypeConstraints(TP, NotRegisters);
// If either the output or input of the xform does not have exact
// type info. We assume they must be the same. Otherwise, it is perfectly
// legal to transform from one type to a completely different type.
#if 0
if (!hasTypeSet() || !getChild(0)->hasTypeSet()) {
bool MadeChange = UpdateNodeType(getChild(0)->getExtType(), TP);
MadeChange |= getChild(0)->UpdateNodeType(getExtType(), TP);
return MadeChange;
}
#endif
return MadeChange;
}
/// OnlyOnRHSOfCommutative - Return true if this value is only allowed on the
/// RHS of a commutative operation, not the on LHS.
static bool OnlyOnRHSOfCommutative(TreePatternNode *N) {
if (!N->isLeaf() && N->getOperator()->getName() == "imm")
return true;
if (N->isLeaf() && isa<IntInit>(N->getLeafValue()))
return true;
return false;
}
/// canPatternMatch - If it is impossible for this pattern to match on this
/// target, fill in Reason and return false. Otherwise, return true. This is
/// used as a sanity check for .td files (to prevent people from writing stuff
/// that can never possibly work), and to prevent the pattern permuter from
/// generating stuff that is useless.
bool TreePatternNode::canPatternMatch(std::string &Reason,
const CodeGenDAGPatterns &CDP) {
if (isLeaf()) return true;
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
if (!getChild(i)->canPatternMatch(Reason, CDP))
return false;
// If this is an intrinsic, handle cases that would make it not match. For
// example, if an operand is required to be an immediate.
if (getOperator()->isSubClassOf("Intrinsic")) {
// TODO:
return true;
}
if (getOperator()->isSubClassOf("ComplexPattern"))
return true;
// If this node is a commutative operator, check that the LHS isn't an
// immediate.
const SDNodeInfo &NodeInfo = CDP.getSDNodeInfo(getOperator());
bool isCommIntrinsic = isCommutativeIntrinsic(CDP);
if (NodeInfo.hasProperty(SDNPCommutative) || isCommIntrinsic) {
// Scan all of the operands of the node and make sure that only the last one
// is a constant node, unless the RHS also is.
if (!OnlyOnRHSOfCommutative(getChild(getNumChildren()-1))) {
bool Skip = isCommIntrinsic ? 1 : 0; // First operand is intrinsic id.
for (unsigned i = Skip, e = getNumChildren()-1; i != e; ++i)
if (OnlyOnRHSOfCommutative(getChild(i))) {
Reason="Immediate value must be on the RHS of commutative operators!";
return false;
}
}
}
return true;
}
//===----------------------------------------------------------------------===//
// TreePattern implementation
//
TreePattern::TreePattern(Record *TheRec, ListInit *RawPat, bool isInput,
CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp),
isInputPattern(isInput), HasError(false) {
for (Init *I : RawPat->getValues())
Trees.push_back(ParseTreePattern(I, ""));
}
TreePattern::TreePattern(Record *TheRec, DagInit *Pat, bool isInput,
CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp),
isInputPattern(isInput), HasError(false) {
Trees.push_back(ParseTreePattern(Pat, ""));
}
TreePattern::TreePattern(Record *TheRec, TreePatternNode *Pat, bool isInput,
CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp),
isInputPattern(isInput), HasError(false) {
Trees.push_back(Pat);
}
void TreePattern::error(const Twine &Msg) {
if (HasError)
return;
dump();
PrintError(TheRecord->getLoc(), "In " + TheRecord->getName() + ": " + Msg);
HasError = true;
}
void TreePattern::ComputeNamedNodes() {
for (unsigned i = 0, e = Trees.size(); i != e; ++i)
ComputeNamedNodes(Trees[i]);
}
void TreePattern::ComputeNamedNodes(TreePatternNode *N) {
if (!N->getName().empty())
NamedNodes[N->getName()].push_back(N);
for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
ComputeNamedNodes(N->getChild(i));
}
TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){
if (DefInit *DI = dyn_cast<DefInit>(TheInit)) {
Record *R = DI->getDef();
// Direct reference to a leaf DagNode or PatFrag? Turn it into a
// TreePatternNode of its own. For example:
/// (foo GPR, imm) -> (foo GPR, (imm))
if (R->isSubClassOf("SDNode") || R->isSubClassOf("PatFrag"))
return ParseTreePattern(
DagInit::get(DI, "",
std::vector<std::pair<Init*, std::string> >()),
OpName);
// Input argument?
TreePatternNode *Res = new TreePatternNode(DI, 1);
if (R->getName() == "node" && !OpName.empty()) {
if (OpName.empty())
error("'node' argument requires a name to match with operand list");
Args.push_back(OpName);
}
Res->setName(OpName);
return Res;
}
// ?:$name or just $name.
if (isa<UnsetInit>(TheInit)) {
if (OpName.empty())
error("'?' argument requires a name to match with operand list");
TreePatternNode *Res = new TreePatternNode(TheInit, 1);
Args.push_back(OpName);
Res->setName(OpName);
return Res;
}
if (IntInit *II = dyn_cast<IntInit>(TheInit)) {
if (!OpName.empty())
error("Constant int argument should not have a name!");
return new TreePatternNode(II, 1);
}
if (BitsInit *BI = dyn_cast<BitsInit>(TheInit)) {
// Turn this into an IntInit.
Init *II = BI->convertInitializerTo(IntRecTy::get());
if (!II || !isa<IntInit>(II))
error("Bits value must be constants!");
return ParseTreePattern(II, OpName);
}
DagInit *Dag = dyn_cast<DagInit>(TheInit);
if (!Dag) {
TheInit->dump();
error("Pattern has unexpected init kind!");
}
DefInit *OpDef = dyn_cast<DefInit>(Dag->getOperator());
if (!OpDef) error("Pattern has unexpected operator type!");
Record *Operator = OpDef->getDef();
if (Operator->isSubClassOf("ValueType")) {
// If the operator is a ValueType, then this must be "type cast" of a leaf
// node.
if (Dag->getNumArgs() != 1)
error("Type cast only takes one operand!");
TreePatternNode *New = ParseTreePattern(Dag->getArg(0), Dag->getArgName(0));
// Apply the type cast.
assert(New->getNumTypes() == 1 && "FIXME: Unhandled");
New->UpdateNodeType(0, getValueType(Operator), *this);
if (!OpName.empty())
error("ValueType cast should not have a name!");
return New;
}
// Verify that this is something that makes sense for an operator.
if (!Operator->isSubClassOf("PatFrag") &&
!Operator->isSubClassOf("SDNode") &&
!Operator->isSubClassOf("Instruction") &&
!Operator->isSubClassOf("SDNodeXForm") &&
!Operator->isSubClassOf("Intrinsic") &&
!Operator->isSubClassOf("ComplexPattern") &&
Operator->getName() != "set" &&
Operator->getName() != "implicit")
error("Unrecognized node '" + Operator->getName() + "'!");
// Check to see if this is something that is illegal in an input pattern.
if (isInputPattern) {
if (Operator->isSubClassOf("Instruction") ||
Operator->isSubClassOf("SDNodeXForm"))
error("Cannot use '" + Operator->getName() + "' in an input pattern!");
} else {
if (Operator->isSubClassOf("Intrinsic"))
error("Cannot use '" + Operator->getName() + "' in an output pattern!");
if (Operator->isSubClassOf("SDNode") &&
Operator->getName() != "imm" &&
Operator->getName() != "fpimm" &&
Operator->getName() != "tglobaltlsaddr" &&
Operator->getName() != "tconstpool" &&
Operator->getName() != "tjumptable" &&
Operator->getName() != "tframeindex" &&
Operator->getName() != "texternalsym" &&
Operator->getName() != "tblockaddress" &&
Operator->getName() != "tglobaladdr" &&
Operator->getName() != "bb" &&
Operator->getName() != "vt" &&
Operator->getName() != "mcsym")
error("Cannot use '" + Operator->getName() + "' in an output pattern!");
}
std::vector<TreePatternNode*> Children;
// Parse all the operands.
for (unsigned i = 0, e = Dag->getNumArgs(); i != e; ++i)
Children.push_back(ParseTreePattern(Dag->getArg(i), Dag->getArgName(i)));
// If the operator is an intrinsic, then this is just syntactic sugar for for
// (intrinsic_* <number>, ..children..). Pick the right intrinsic node, and
// convert the intrinsic name to a number.
if (Operator->isSubClassOf("Intrinsic")) {
const CodeGenIntrinsic &Int = getDAGPatterns().getIntrinsic(Operator);
unsigned IID = getDAGPatterns().getIntrinsicID(Operator)+1;
// If this intrinsic returns void, it must have side-effects and thus a
// chain.
if (Int.IS.RetVTs.empty())
Operator = getDAGPatterns().get_intrinsic_void_sdnode();
else if (Int.ModRef != CodeGenIntrinsic::NoMem)
// Has side-effects, requires chain.
Operator = getDAGPatterns().get_intrinsic_w_chain_sdnode();
else // Otherwise, no chain.
Operator = getDAGPatterns().get_intrinsic_wo_chain_sdnode();
TreePatternNode *IIDNode = new TreePatternNode(IntInit::get(IID), 1);
Children.insert(Children.begin(), IIDNode);
}
if (Operator->isSubClassOf("ComplexPattern")) {
for (unsigned i = 0; i < Children.size(); ++i) {
TreePatternNode *Child = Children[i];
if (Child->getName().empty())
error("All arguments to a ComplexPattern must be named");
// Check that the ComplexPattern uses are consistent: "(MY_PAT $a, $b)"
// and "(MY_PAT $b, $a)" should not be allowed in the same pattern;
// neither should "(MY_PAT_1 $a, $b)" and "(MY_PAT_2 $a, $b)".
auto OperandId = std::make_pair(Operator, i);
auto PrevOp = ComplexPatternOperands.find(Child->getName());
if (PrevOp != ComplexPatternOperands.end()) {
if (PrevOp->getValue() != OperandId)
error("All ComplexPattern operands must appear consistently: "
"in the same order in just one ComplexPattern instance.");
} else
ComplexPatternOperands[Child->getName()] = OperandId;
}
}
unsigned NumResults = GetNumNodeResults(Operator, CDP);
TreePatternNode *Result = new TreePatternNode(Operator, Children, NumResults);
Result->setName(OpName);
if (!Dag->getName().empty()) {
assert(Result->getName().empty());
Result->setName(Dag->getName());
}
return Result;
}
/// SimplifyTree - See if we can simplify this tree to eliminate something that
/// will never match in favor of something obvious that will. This is here
/// strictly as a convenience to target authors because it allows them to write
/// more type generic things and have useless type casts fold away.
///
/// This returns true if any change is made.
static bool SimplifyTree(TreePatternNode *&N) {
if (N->isLeaf())
return false;
// If we have a bitconvert with a resolved type and if the source and
// destination types are the same, then the bitconvert is useless, remove it.
if (N->getOperator()->getName() == "bitconvert" &&
N->getExtType(0).isConcrete() &&
N->getExtType(0) == N->getChild(0)->getExtType(0) &&
N->getName().empty()) {
N = N->getChild(0);
SimplifyTree(N);
return true;
}
// Walk all children.
bool MadeChange = false;
for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i) {
TreePatternNode *Child = N->getChild(i);
MadeChange |= SimplifyTree(Child);
N->setChild(i, Child);
}
return MadeChange;
}
/// InferAllTypes - Infer/propagate as many types throughout the expression
/// patterns as possible. Return true if all types are inferred, false
/// otherwise. Flags an error if a type contradiction is found.
bool TreePattern::
InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> > *InNamedTypes) {
if (NamedNodes.empty())
ComputeNamedNodes();
bool MadeChange = true;
while (MadeChange) {
MadeChange = false;
for (unsigned i = 0, e = Trees.size(); i != e; ++i) {
MadeChange |= Trees[i]->ApplyTypeConstraints(*this, false);
MadeChange |= SimplifyTree(Trees[i]);
}
// If there are constraints on our named nodes, apply them.
for (StringMap<SmallVector<TreePatternNode*,1> >::iterator
I = NamedNodes.begin(), E = NamedNodes.end(); I != E; ++I) {
SmallVectorImpl<TreePatternNode*> &Nodes = I->second;
// If we have input named node types, propagate their types to the named
// values here.
if (InNamedTypes) {
if (!InNamedTypes->count(I->getKey())) {
error("Node '" + std::string(I->getKey()) +
"' in output pattern but not input pattern");
return true;
}
const SmallVectorImpl<TreePatternNode*> &InNodes =
InNamedTypes->find(I->getKey())->second;
// The input types should be fully resolved by now.
for (unsigned i = 0, e = Nodes.size(); i != e; ++i) {
// If this node is a register class, and it is the root of the pattern
// then we're mapping something onto an input register. We allow
// changing the type of the input register in this case. This allows
// us to match things like:
// def : Pat<(v1i64 (bitconvert(v2i32 DPR:$src))), (v1i64 DPR:$src)>;
if (Nodes[i] == Trees[0] && Nodes[i]->isLeaf()) {
DefInit *DI = dyn_cast<DefInit>(Nodes[i]->getLeafValue());
if (DI && (DI->getDef()->isSubClassOf("RegisterClass") ||
DI->getDef()->isSubClassOf("RegisterOperand")))
continue;
}
assert(Nodes[i]->getNumTypes() == 1 &&
InNodes[0]->getNumTypes() == 1 &&
"FIXME: cannot name multiple result nodes yet");
MadeChange |= Nodes[i]->UpdateNodeType(0, InNodes[0]->getExtType(0),
*this);
}
}
// If there are multiple nodes with the same name, they must all have the
// same type.
if (I->second.size() > 1) {
for (unsigned i = 0, e = Nodes.size()-1; i != e; ++i) {
TreePatternNode *N1 = Nodes[i], *N2 = Nodes[i+1];
assert(N1->getNumTypes() == 1 && N2->getNumTypes() == 1 &&
"FIXME: cannot name multiple result nodes yet");
MadeChange |= N1->UpdateNodeType(0, N2->getExtType(0), *this);
MadeChange |= N2->UpdateNodeType(0, N1->getExtType(0), *this);
}
}
}
}
bool HasUnresolvedTypes = false;
for (unsigned i = 0, e = Trees.size(); i != e; ++i)
HasUnresolvedTypes |= Trees[i]->ContainsUnresolvedType();
return !HasUnresolvedTypes;
}
void TreePattern::print(raw_ostream &OS) const {
OS << getRecord()->getName();
if (!Args.empty()) {
OS << "(" << Args[0];
for (unsigned i = 1, e = Args.size(); i != e; ++i)
OS << ", " << Args[i];
OS << ")";
}
OS << ": ";
if (Trees.size() > 1)
OS << "[\n";
for (unsigned i = 0, e = Trees.size(); i != e; ++i) {
OS << "\t";
Trees[i]->print(OS);
OS << "\n";
}
if (Trees.size() > 1)
OS << "]\n";
}
void TreePattern::dump() const { print(errs()); }
//===----------------------------------------------------------------------===//
// CodeGenDAGPatterns implementation
//
CodeGenDAGPatterns::CodeGenDAGPatterns(RecordKeeper &R) :
Records(R), Target(R) {
Intrinsics = LoadIntrinsics(Records, false);
TgtIntrinsics = LoadIntrinsics(Records, true);
ParseNodeInfo();
ParseNodeTransforms();
ParseComplexPatterns();
ParsePatternFragments();
ParseDefaultOperands();
ParseInstructions();
ParsePatternFragments(/*OutFrags*/true);
ParsePatterns();
// Generate variants. For example, commutative patterns can match
// multiple ways. Add them to PatternsToMatch as well.
GenerateVariants();
// Infer instruction flags. For example, we can detect loads,
// stores, and side effects in many cases by examining an
// instruction's pattern.
InferInstructionFlags();
// Verify that instruction flags match the patterns.
VerifyInstructionFlags();
}
Record *CodeGenDAGPatterns::getSDNodeNamed(const std::string &Name) const {
Record *N = Records.getDef(Name);
if (!N || !N->isSubClassOf("SDNode"))
PrintFatalError("Error getting SDNode '" + Name + "'!");
return N;
}
// Parse all of the SDNode definitions for the target, populating SDNodes.
void CodeGenDAGPatterns::ParseNodeInfo() {
std::vector<Record*> Nodes = Records.getAllDerivedDefinitions("SDNode");
while (!Nodes.empty()) {
SDNodes.insert(std::make_pair(Nodes.back(), Nodes.back()));
Nodes.pop_back();
}
// Get the builtin intrinsic nodes.
intrinsic_void_sdnode = getSDNodeNamed("intrinsic_void");
intrinsic_w_chain_sdnode = getSDNodeNamed("intrinsic_w_chain");
intrinsic_wo_chain_sdnode = getSDNodeNamed("intrinsic_wo_chain");
}
/// ParseNodeTransforms - Parse all SDNodeXForm instances into the SDNodeXForms
/// map, and emit them to the file as functions.
void CodeGenDAGPatterns::ParseNodeTransforms() {
std::vector<Record*> Xforms = Records.getAllDerivedDefinitions("SDNodeXForm");
while (!Xforms.empty()) {
Record *XFormNode = Xforms.back();
Record *SDNode = XFormNode->getValueAsDef("Opcode");
std::string Code = XFormNode->getValueAsString("XFormFunction");
SDNodeXForms.insert(std::make_pair(XFormNode, NodeXForm(SDNode, Code)));
Xforms.pop_back();
}
}
void CodeGenDAGPatterns::ParseComplexPatterns() {
std::vector<Record*> AMs = Records.getAllDerivedDefinitions("ComplexPattern");
while (!AMs.empty()) {
ComplexPatterns.insert(std::make_pair(AMs.back(), AMs.back()));
AMs.pop_back();
}
}
/// ParsePatternFragments - Parse all of the PatFrag definitions in the .td
/// file, building up the PatternFragments map. After we've collected them all,
/// inline fragments together as necessary, so that there are no references left
/// inside a pattern fragment to a pattern fragment.
///
void CodeGenDAGPatterns::ParsePatternFragments(bool OutFrags) {
std::vector<Record*> Fragments = Records.getAllDerivedDefinitions("PatFrag");
// First step, parse all of the fragments.
for (unsigned i = 0, e = Fragments.size(); i != e; ++i) {
if (OutFrags != Fragments[i]->isSubClassOf("OutPatFrag"))
continue;
DagInit *Tree = Fragments[i]->getValueAsDag("Fragment");
TreePattern *P =
(PatternFragments[Fragments[i]] = llvm::make_unique<TreePattern>(
Fragments[i], Tree, !Fragments[i]->isSubClassOf("OutPatFrag"),
*this)).get();
// Validate the argument list, converting it to set, to discard duplicates.
std::vector<std::string> &Args = P->getArgList();
std::set<std::string> OperandsSet(Args.begin(), Args.end());
if (OperandsSet.count(""))
P->error("Cannot have unnamed 'node' values in pattern fragment!");
// Parse the operands list.
DagInit *OpsList = Fragments[i]->getValueAsDag("Operands");
DefInit *OpsOp = dyn_cast<DefInit>(OpsList->getOperator());
// Special cases: ops == outs == ins. Different names are used to
// improve readability.
if (!OpsOp ||
(OpsOp->getDef()->getName() != "ops" &&
OpsOp->getDef()->getName() != "outs" &&
OpsOp->getDef()->getName() != "ins"))
P->error("Operands list should start with '(ops ... '!");
// Copy over the arguments.
Args.clear();
for (unsigned j = 0, e = OpsList->getNumArgs(); j != e; ++j) {
if (!isa<DefInit>(OpsList->getArg(j)) ||
cast<DefInit>(OpsList->getArg(j))->getDef()->getName() != "node")
P->error("Operands list should all be 'node' values.");
if (OpsList->getArgName(j).empty())
P->error("Operands list should have names for each operand!");
if (!OperandsSet.count(OpsList->getArgName(j)))
P->error("'" + OpsList->getArgName(j) +
"' does not occur in pattern or was multiply specified!");
OperandsSet.erase(OpsList->getArgName(j));
Args.push_back(OpsList->getArgName(j));
}
if (!OperandsSet.empty())
P->error("Operands list does not contain an entry for operand '" +
*OperandsSet.begin() + "'!");
// If there is a code init for this fragment, keep track of the fact that
// this fragment uses it.
TreePredicateFn PredFn(P);
if (!PredFn.isAlwaysTrue())
P->getOnlyTree()->addPredicateFn(PredFn);
// If there is a node transformation corresponding to this, keep track of
// it.
Record *Transform = Fragments[i]->getValueAsDef("OperandTransform");
if (!getSDNodeTransform(Transform).second.empty()) // not noop xform?
P->getOnlyTree()->setTransformFn(Transform);
}
// Now that we've parsed all of the tree fragments, do a closure on them so
// that there are not references to PatFrags left inside of them.
for (unsigned i = 0, e = Fragments.size(); i != e; ++i) {
if (OutFrags != Fragments[i]->isSubClassOf("OutPatFrag"))
continue;
TreePattern &ThePat = *PatternFragments[Fragments[i]];
ThePat.InlinePatternFragments();
// Infer as many types as possible. Don't worry about it if we don't infer
// all of them, some may depend on the inputs of the pattern.
ThePat.InferAllTypes();
ThePat.resetError();
// If debugging, print out the pattern fragment result.
DEBUG(ThePat.dump());
}
}
void CodeGenDAGPatterns::ParseDefaultOperands() {
std::vector<Record*> DefaultOps;
DefaultOps = Records.getAllDerivedDefinitions("OperandWithDefaultOps");
// Find some SDNode.
assert(!SDNodes.empty() && "No SDNodes parsed?");
Init *SomeSDNode = DefInit::get(SDNodes.begin()->first);
for (unsigned i = 0, e = DefaultOps.size(); i != e; ++i) {
DagInit *DefaultInfo = DefaultOps[i]->getValueAsDag("DefaultOps");
// Clone the DefaultInfo dag node, changing the operator from 'ops' to
// SomeSDnode so that we can parse this.
std::vector<std::pair<Init*, std::string> > Ops;
for (unsigned op = 0, e = DefaultInfo->getNumArgs(); op != e; ++op)
Ops.push_back(std::make_pair(DefaultInfo->getArg(op),
DefaultInfo->getArgName(op)));
DagInit *DI = DagInit::get(SomeSDNode, "", Ops);
// Create a TreePattern to parse this.
TreePattern P(DefaultOps[i], DI, false, *this);
assert(P.getNumTrees() == 1 && "This ctor can only produce one tree!");
// Copy the operands over into a DAGDefaultOperand.
DAGDefaultOperand DefaultOpInfo;
TreePatternNode *T = P.getTree(0);
for (unsigned op = 0, e = T->getNumChildren(); op != e; ++op) {
TreePatternNode *TPN = T->getChild(op);
while (TPN->ApplyTypeConstraints(P, false))
/* Resolve all types */;
if (TPN->ContainsUnresolvedType()) {
PrintFatalError("Value #" + Twine(i) + " of OperandWithDefaultOps '" +
DefaultOps[i]->getName() +
"' doesn't have a concrete type!");
}
DefaultOpInfo.DefaultOps.push_back(TPN);
}
// Insert it into the DefaultOperands map so we can find it later.
DefaultOperands[DefaultOps[i]] = DefaultOpInfo;
}
}
/// HandleUse - Given "Pat" a leaf in the pattern, check to see if it is an
/// instruction input. Return true if this is a real use.
static bool HandleUse(TreePattern *I, TreePatternNode *Pat,
std::map<std::string, TreePatternNode*> &InstInputs) {
// No name -> not interesting.
if (Pat->getName().empty()) {
if (Pat->isLeaf()) {
DefInit *DI = dyn_cast<DefInit>(Pat->getLeafValue());
if (DI && (DI->getDef()->isSubClassOf("RegisterClass") ||
DI->getDef()->isSubClassOf("RegisterOperand")))
I->error("Input " + DI->getDef()->getName() + " must be named!");
}
return false;
}
Record *Rec;
if (Pat->isLeaf()) {
DefInit *DI = dyn_cast<DefInit>(Pat->getLeafValue());
if (!DI) I->error("Input $" + Pat->getName() + " must be an identifier!");
Rec = DI->getDef();
} else {
Rec = Pat->getOperator();
}
// SRCVALUE nodes are ignored.
if (Rec->getName() == "srcvalue")
return false;
TreePatternNode *&Slot = InstInputs[Pat->getName()];
if (!Slot) {
Slot = Pat;
return true;
}
Record *SlotRec;
if (Slot->isLeaf()) {
SlotRec = cast<DefInit>(Slot->getLeafValue())->getDef();
} else {
assert(Slot->getNumChildren() == 0 && "can't be a use with children!");
SlotRec = Slot->getOperator();
}
// Ensure that the inputs agree if we've already seen this input.
if (Rec != SlotRec)
I->error("All $" + Pat->getName() + " inputs must agree with each other");
if (Slot->getExtTypes() != Pat->getExtTypes())
I->error("All $" + Pat->getName() + " inputs must agree with each other");
return true;
}
/// FindPatternInputsAndOutputs - Scan the specified TreePatternNode (which is
/// part of "I", the instruction), computing the set of inputs and outputs of
/// the pattern. Report errors if we see anything naughty.
void CodeGenDAGPatterns::
FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
std::map<std::string, TreePatternNode*> &InstInputs,
std::map<std::string, TreePatternNode*>&InstResults,
std::vector<Record*> &InstImpResults) {
if (Pat->isLeaf()) {
bool isUse = HandleUse(I, Pat, InstInputs);
if (!isUse && Pat->getTransformFn())
I->error("Cannot specify a transform function for a non-input value!");
return;
}
if (Pat->getOperator()->getName() == "implicit") {
for (unsigned i = 0, e = Pat->getNumChildren(); i != e; ++i) {
TreePatternNode *Dest = Pat->getChild(i);
if (!Dest->isLeaf())
I->error("implicitly defined value should be a register!");
DefInit *Val = dyn_cast<DefInit>(Dest->getLeafValue());
if (!Val || !Val->getDef()->isSubClassOf("Register"))
I->error("implicitly defined value should be a register!");
InstImpResults.push_back(Val->getDef());
}
return;
}
if (Pat->getOperator()->getName() != "set") {
// If this is not a set, verify that the children nodes are not void typed,
// and recurse.
for (unsigned i = 0, e = Pat->getNumChildren(); i != e; ++i) {
if (Pat->getChild(i)->getNumTypes() == 0)
I->error("Cannot have void nodes inside of patterns!");
FindPatternInputsAndOutputs(I, Pat->getChild(i), InstInputs, InstResults,
InstImpResults);
}
// If this is a non-leaf node with no children, treat it basically as if
// it were a leaf. This handles nodes like (imm).
bool isUse = HandleUse(I, Pat, InstInputs);
if (!isUse && Pat->getTransformFn())
I->error("Cannot specify a transform function for a non-input value!");
return;
}
// Otherwise, this is a set, validate and collect instruction results.
if (Pat->getNumChildren() == 0)
I->error("set requires operands!");
if (Pat->getTransformFn())
I->error("Cannot specify a transform function on a set node!");
// Check the set destinations.
unsigned NumDests = Pat->getNumChildren()-1;
for (unsigned i = 0; i != NumDests; ++i) {
TreePatternNode *Dest = Pat->getChild(i);
if (!Dest->isLeaf())
I->error("set destination should be a register!");
DefInit *Val = dyn_cast<DefInit>(Dest->getLeafValue());
if (!Val) {
I->error("set destination should be a register!");
continue;
}
if (Val->getDef()->isSubClassOf("RegisterClass") ||
Val->getDef()->isSubClassOf("ValueType") ||
Val->getDef()->isSubClassOf("RegisterOperand") ||
Val->getDef()->isSubClassOf("PointerLikeRegClass")) {
if (Dest->getName().empty())
I->error("set destination must have a name!");
if (InstResults.count(Dest->getName()))
I->error("cannot set '" + Dest->getName() +"' multiple times");
InstResults[Dest->getName()] = Dest;
} else if (Val->getDef()->isSubClassOf("Register")) {
InstImpResults.push_back(Val->getDef());
} else {
I->error("set destination should be a register!");
}
}
// Verify and collect info from the computation.
FindPatternInputsAndOutputs(I, Pat->getChild(NumDests),
InstInputs, InstResults, InstImpResults);
}
//===----------------------------------------------------------------------===//
// Instruction Analysis
//===----------------------------------------------------------------------===//
class InstAnalyzer {
const CodeGenDAGPatterns &CDP;
public:
bool hasSideEffects;
bool mayStore;
bool mayLoad;
bool isBitcast;
bool isVariadic;
InstAnalyzer(const CodeGenDAGPatterns &cdp)
: CDP(cdp), hasSideEffects(false), mayStore(false), mayLoad(false),
isBitcast(false), isVariadic(false) {}
void Analyze(const TreePattern *Pat) {
// Assume only the first tree is the pattern. The others are clobber nodes.
AnalyzeNode(Pat->getTree(0));
}
void Analyze(const PatternToMatch *Pat) {
AnalyzeNode(Pat->getSrcPattern());
}
private:
bool IsNodeBitcast(const TreePatternNode *N) const {
if (hasSideEffects || mayLoad || mayStore || isVariadic)
return false;
if (N->getNumChildren() != 2)
return false;
const TreePatternNode *N0 = N->getChild(0);
if (!N0->isLeaf() || !isa<DefInit>(N0->getLeafValue()))
return false;
const TreePatternNode *N1 = N->getChild(1);
if (N1->isLeaf())
return false;
if (N1->getNumChildren() != 1 || !N1->getChild(0)->isLeaf())
return false;
const SDNodeInfo &OpInfo = CDP.getSDNodeInfo(N1->getOperator());
if (OpInfo.getNumResults() != 1 || OpInfo.getNumOperands() != 1)
return false;
return OpInfo.getEnumName() == "ISD::BITCAST";
}
public:
void AnalyzeNode(const TreePatternNode *N) {
if (N->isLeaf()) {
if (DefInit *DI = dyn_cast<DefInit>(N->getLeafValue())) {
Record *LeafRec = DI->getDef();
// Handle ComplexPattern leaves.
if (LeafRec->isSubClassOf("ComplexPattern")) {
const ComplexPattern &CP = CDP.getComplexPattern(LeafRec);
if (CP.hasProperty(SDNPMayStore)) mayStore = true;
if (CP.hasProperty(SDNPMayLoad)) mayLoad = true;
if (CP.hasProperty(SDNPSideEffect)) hasSideEffects = true;
}
}
return;
}
// Analyze children.
for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
AnalyzeNode(N->getChild(i));
// Ignore set nodes, which are not SDNodes.
if (N->getOperator()->getName() == "set") {
isBitcast = IsNodeBitcast(N);
return;
}
// Notice properties of the node.
if (N->NodeHasProperty(SDNPMayStore, CDP)) mayStore = true;
if (N->NodeHasProperty(SDNPMayLoad, CDP)) mayLoad = true;
if (N->NodeHasProperty(SDNPSideEffect, CDP)) hasSideEffects = true;
if (N->NodeHasProperty(SDNPVariadic, CDP)) isVariadic = true;
if (const CodeGenIntrinsic *IntInfo = N->getIntrinsicInfo(CDP)) {
// If this is an intrinsic, analyze it.
if (IntInfo->ModRef >= CodeGenIntrinsic::ReadArgMem)
mayLoad = true;// These may load memory.
if (IntInfo->ModRef >= CodeGenIntrinsic::ReadWriteArgMem)
mayStore = true;// Intrinsics that can write to memory are 'mayStore'.
if (IntInfo->ModRef >= CodeGenIntrinsic::ReadWriteMem)
// WriteMem intrinsics can have other strange effects.
hasSideEffects = true;
}
}
};
static bool InferFromPattern(CodeGenInstruction &InstInfo,
const InstAnalyzer &PatInfo,
Record *PatDef) {
bool Error = false;
// Remember where InstInfo got its flags.
if (InstInfo.hasUndefFlags())
InstInfo.InferredFrom = PatDef;
// Check explicitly set flags for consistency.
if (InstInfo.hasSideEffects != PatInfo.hasSideEffects &&
!InstInfo.hasSideEffects_Unset) {
// Allow explicitly setting hasSideEffects = 1 on instructions, even when
// the pattern has no side effects. That could be useful for div/rem
// instructions that may trap.
if (!InstInfo.hasSideEffects) {
Error = true;
PrintError(PatDef->getLoc(), "Pattern doesn't match hasSideEffects = " +
Twine(InstInfo.hasSideEffects));
}
}
if (InstInfo.mayStore != PatInfo.mayStore && !InstInfo.mayStore_Unset) {
Error = true;
PrintError(PatDef->getLoc(), "Pattern doesn't match mayStore = " +
Twine(InstInfo.mayStore));
}
if (InstInfo.mayLoad != PatInfo.mayLoad && !InstInfo.mayLoad_Unset) {
// Allow explicitly setting mayLoad = 1, even when the pattern has no loads.
// Some targets translate imediates to loads.
if (!InstInfo.mayLoad) {
Error = true;
PrintError(PatDef->getLoc(), "Pattern doesn't match mayLoad = " +
Twine(InstInfo.mayLoad));
}
}
// Transfer inferred flags.
InstInfo.hasSideEffects |= PatInfo.hasSideEffects;
InstInfo.mayStore |= PatInfo.mayStore;
InstInfo.mayLoad |= PatInfo.mayLoad;
// These flags are silently added without any verification.
InstInfo.isBitcast |= PatInfo.isBitcast;
// Don't infer isVariadic. This flag means something different on SDNodes and
// instructions. For example, a CALL SDNode is variadic because it has the
// call arguments as operands, but a CALL instruction is not variadic - it
// has argument registers as implicit, not explicit uses.
return Error;
}
/// hasNullFragReference - Return true if the DAG has any reference to the
/// null_frag operator.
static bool hasNullFragReference(DagInit *DI) {
DefInit *OpDef = dyn_cast<DefInit>(DI->getOperator());
if (!OpDef) return false;
Record *Operator = OpDef->getDef();
// If this is the null fragment, return true.
if (Operator->getName() == "null_frag") return true;
// If any of the arguments reference the null fragment, return true.
for (unsigned i = 0, e = DI->getNumArgs(); i != e; ++i) {
DagInit *Arg = dyn_cast<DagInit>(DI->getArg(i));
if (Arg && hasNullFragReference(Arg))
return true;
}
return false;
}
/// hasNullFragReference - Return true if any DAG in the list references
/// the null_frag operator.
static bool hasNullFragReference(ListInit *LI) {
for (Init *I : LI->getValues()) {
DagInit *DI = dyn_cast<DagInit>(I);
assert(DI && "non-dag in an instruction Pattern list?!");
if (hasNullFragReference(DI))
return true;
}
return false;
}
/// Get all the instructions in a tree.
static void
getInstructionsInTree(TreePatternNode *Tree, SmallVectorImpl<Record*> &Instrs) {
if (Tree->isLeaf())
return;
if (Tree->getOperator()->isSubClassOf("Instruction"))
Instrs.push_back(Tree->getOperator());
for (unsigned i = 0, e = Tree->getNumChildren(); i != e; ++i)
getInstructionsInTree(Tree->getChild(i), Instrs);
}
/// Check the class of a pattern leaf node against the instruction operand it
/// represents.
static bool checkOperandClass(CGIOperandList::OperandInfo &OI,
Record *Leaf) {
if (OI.Rec == Leaf)
return true;
// Allow direct value types to be used in instruction set patterns.
// The type will be checked later.
if (Leaf->isSubClassOf("ValueType"))
return true;
// Patterns can also be ComplexPattern instances.
if (Leaf->isSubClassOf("ComplexPattern"))
return true;
return false;
}
const DAGInstruction &CodeGenDAGPatterns::parseInstructionPattern(
CodeGenInstruction &CGI, ListInit *Pat, DAGInstMap &DAGInsts) {
assert(!DAGInsts.count(CGI.TheDef) && "Instruction already parsed!");
// Parse the instruction.
TreePattern *I = new TreePattern(CGI.TheDef, Pat, true, *this);
// Inline pattern fragments into it.
I->InlinePatternFragments();
// Infer as many types as possible. If we cannot infer all of them, we can
// never do anything with this instruction pattern: report it to the user.
if (!I->InferAllTypes())
I->error("Could not infer all types in pattern!");
// InstInputs - Keep track of all of the inputs of the instruction, along
// with the record they are declared as.
std::map<std::string, TreePatternNode*> InstInputs;
// InstResults - Keep track of all the virtual registers that are 'set'
// in the instruction, including what reg class they are.
std::map<std::string, TreePatternNode*> InstResults;
std::vector<Record*> InstImpResults;
// Verify that the top-level forms in the instruction are of void type, and
// fill in the InstResults map.
for (unsigned j = 0, e = I->getNumTrees(); j != e; ++j) {
TreePatternNode *Pat = I->getTree(j);
if (Pat->getNumTypes() != 0)
I->error("Top-level forms in instruction pattern should have"
" void types");
// Find inputs and outputs, and verify the structure of the uses/defs.
FindPatternInputsAndOutputs(I, Pat, InstInputs, InstResults,
InstImpResults);
}
// Now that we have inputs and outputs of the pattern, inspect the operands
// list for the instruction. This determines the order that operands are
// added to the machine instruction the node corresponds to.
unsigned NumResults = InstResults.size();
// Parse the operands list from the (ops) list, validating it.
assert(I->getArgList().empty() && "Args list should still be empty here!");
// Check that all of the results occur first in the list.
std::vector<Record*> Results;
SmallVector<TreePatternNode *, 2> ResNodes;
for (unsigned i = 0; i != NumResults; ++i) {
if (i == CGI.Operands.size())
I->error("'" + InstResults.begin()->first +
"' set but does not appear in operand list!");
const std::string &OpName = CGI.Operands[i].Name;
// Check that it exists in InstResults.
TreePatternNode *RNode = InstResults[OpName];
if (!RNode)
I->error("Operand $" + OpName + " does not exist in operand list!");
ResNodes.push_back(RNode);
Record *R = cast<DefInit>(RNode->getLeafValue())->getDef();
if (!R)
I->error("Operand $" + OpName + " should be a set destination: all "
"outputs must occur before inputs in operand list!");
if (!checkOperandClass(CGI.Operands[i], R))
I->error("Operand $" + OpName + " class mismatch!");
// Remember the return type.
Results.push_back(CGI.Operands[i].Rec);
// Okay, this one checks out.
InstResults.erase(OpName);
}
// Loop over the inputs next. Make a copy of InstInputs so we can destroy
// the copy while we're checking the inputs.
std::map<std::string, TreePatternNode*> InstInputsCheck(InstInputs);
std::vector<TreePatternNode*> ResultNodeOperands;
std::vector<Record*> Operands;
for (unsigned i = NumResults, e = CGI.Operands.size(); i != e; ++i) {
CGIOperandList::OperandInfo &Op = CGI.Operands[i];
const std::string &OpName = Op.Name;
if (OpName.empty())
I->error("Operand #" + utostr(i) + " in operands list has no name!");
if (!InstInputsCheck.count(OpName)) {
// If this is an operand with a DefaultOps set filled in, we can ignore
// this. When we codegen it, we will do so as always executed.
if (Op.Rec->isSubClassOf("OperandWithDefaultOps")) {
// Does it have a non-empty DefaultOps field? If so, ignore this
// operand.
if (!getDefaultOperand(Op.Rec).DefaultOps.empty())
continue;
}
I->error("Operand $" + OpName +
" does not appear in the instruction pattern");
}
TreePatternNode *InVal = InstInputsCheck[OpName];
InstInputsCheck.erase(OpName); // It occurred, remove from map.
if (InVal->isLeaf() && isa<DefInit>(InVal->getLeafValue())) {
Record *InRec = static_cast<DefInit*>(InVal->getLeafValue())->getDef();
if (!checkOperandClass(Op, InRec))
I->error("Operand $" + OpName + "'s register class disagrees"
" between the operand and pattern");
}
Operands.push_back(Op.Rec);
// Construct the result for the dest-pattern operand list.
TreePatternNode *OpNode = InVal->clone();
// No predicate is useful on the result.
OpNode->clearPredicateFns();
// Promote the xform function to be an explicit node if set.
if (Record *Xform = OpNode->getTransformFn()) {
OpNode->setTransformFn(nullptr);
std::vector<TreePatternNode*> Children;
Children.push_back(OpNode);
OpNode = new TreePatternNode(Xform, Children, OpNode->getNumTypes());
}
ResultNodeOperands.push_back(OpNode);
}
if (!InstInputsCheck.empty())
I->error("Input operand $" + InstInputsCheck.begin()->first +
" occurs in pattern but not in operands list!");
TreePatternNode *ResultPattern =
new TreePatternNode(I->getRecord(), ResultNodeOperands,
GetNumNodeResults(I->getRecord(), *this));
// Copy fully inferred output node types to instruction result pattern.
for (unsigned i = 0; i != NumResults; ++i) {
assert(ResNodes[i]->getNumTypes() == 1 && "FIXME: Unhandled");
ResultPattern->setType(i, ResNodes[i]->getExtType(0));
}
// Create and insert the instruction.
// FIXME: InstImpResults should not be part of DAGInstruction.
DAGInstruction TheInst(I, Results, Operands, InstImpResults);
DAGInsts.insert(std::make_pair(I->getRecord(), TheInst));
// Use a temporary tree pattern to infer all types and make sure that the
// constructed result is correct. This depends on the instruction already
// being inserted into the DAGInsts map.
TreePattern Temp(I->getRecord(), ResultPattern, false, *this);
Temp.InferAllTypes(&I->getNamedNodesMap());
DAGInstruction &TheInsertedInst = DAGInsts.find(I->getRecord())->second;
TheInsertedInst.setResultPattern(Temp.getOnlyTree());
return TheInsertedInst;
}
/// ParseInstructions - Parse all of the instructions, inlining and resolving
/// any fragments involved. This populates the Instructions list with fully
/// resolved instructions.
void CodeGenDAGPatterns::ParseInstructions() {
std::vector<Record*> Instrs = Records.getAllDerivedDefinitions("Instruction");
for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
ListInit *LI = nullptr;
if (isa<ListInit>(Instrs[i]->getValueInit("Pattern")))
LI = Instrs[i]->getValueAsListInit("Pattern");
// If there is no pattern, only collect minimal information about the
// instruction for its operand list. We have to assume that there is one
// result, as we have no detailed info. A pattern which references the
// null_frag operator is as-if no pattern were specified. Normally this
// is from a multiclass expansion w/ a SDPatternOperator passed in as
// null_frag.
if (!LI || LI->empty() || hasNullFragReference(LI)) {
std::vector<Record*> Results;
std::vector<Record*> Operands;
CodeGenInstruction &InstInfo = Target.getInstruction(Instrs[i]);
if (InstInfo.Operands.size() != 0) {
for (unsigned j = 0, e = InstInfo.Operands.NumDefs; j < e; ++j)
Results.push_back(InstInfo.Operands[j].Rec);
// The rest are inputs.
for (unsigned j = InstInfo.Operands.NumDefs,
e = InstInfo.Operands.size(); j < e; ++j)
Operands.push_back(InstInfo.Operands[j].Rec);
}
// Create and insert the instruction.
std::vector<Record*> ImpResults;
Instructions.insert(std::make_pair(Instrs[i],
DAGInstruction(nullptr, Results, Operands, ImpResults)));
continue; // no pattern.
}
CodeGenInstruction &CGI = Target.getInstruction(Instrs[i]);
const DAGInstruction &DI = parseInstructionPattern(CGI, LI, Instructions);
(void)DI;
DEBUG(DI.getPattern()->dump());
}
// If we can, convert the instructions to be patterns that are matched!
for (std::map<Record*, DAGInstruction, LessRecordByID>::iterator II =
Instructions.begin(),
E = Instructions.end(); II != E; ++II) {
DAGInstruction &TheInst = II->second;
TreePattern *I = TheInst.getPattern();
if (!I) continue; // No pattern.
// FIXME: Assume only the first tree is the pattern. The others are clobber
// nodes.
TreePatternNode *Pattern = I->getTree(0);
TreePatternNode *SrcPattern;
if (Pattern->getOperator()->getName() == "set") {
SrcPattern = Pattern->getChild(Pattern->getNumChildren()-1)->clone();
} else{
// Not a set (store or something?)
SrcPattern = Pattern;
}
Record *Instr = II->first;
AddPatternToMatch(I,
PatternToMatch(Instr,
Instr->getValueAsListInit("Predicates"),
SrcPattern,
TheInst.getResultPattern(),
TheInst.getImpResults(),
Instr->getValueAsInt("AddedComplexity"),
Instr->getID()));
}
}
typedef std::pair<const TreePatternNode*, unsigned> NameRecord;
static void FindNames(const TreePatternNode *P,
std::map<std::string, NameRecord> &Names,
TreePattern *PatternTop) {
if (!P->getName().empty()) {
NameRecord &Rec = Names[P->getName()];
// If this is the first instance of the name, remember the node.
if (Rec.second++ == 0)
Rec.first = P;
else if (Rec.first->getExtTypes() != P->getExtTypes())
PatternTop->error("repetition of value: $" + P->getName() +
" where different uses have different types!");
}
if (!P->isLeaf()) {
for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i)
FindNames(P->getChild(i), Names, PatternTop);
}
}
void CodeGenDAGPatterns::AddPatternToMatch(TreePattern *Pattern,
const PatternToMatch &PTM) {
// Do some sanity checking on the pattern we're about to match.
std::string Reason;
if (!PTM.getSrcPattern()->canPatternMatch(Reason, *this)) {
PrintWarning(Pattern->getRecord()->getLoc(),
Twine("Pattern can never match: ") + Reason);
return;
}
// If the source pattern's root is a complex pattern, that complex pattern
// must specify the nodes it can potentially match.
if (const ComplexPattern *CP =
PTM.getSrcPattern()->getComplexPatternInfo(*this))
if (CP->getRootNodes().empty())
Pattern->error("ComplexPattern at root must specify list of opcodes it"
" could match");
// Find all of the named values in the input and output, ensure they have the
// same type.
std::map<std::string, NameRecord> SrcNames, DstNames;
FindNames(PTM.getSrcPattern(), SrcNames, Pattern);
FindNames(PTM.getDstPattern(), DstNames, Pattern);
// Scan all of the named values in the destination pattern, rejecting them if
// they don't exist in the input pattern.
for (std::map<std::string, NameRecord>::iterator
I = DstNames.begin(), E = DstNames.end(); I != E; ++I) {
if (SrcNames[I->first].first == nullptr)
Pattern->error("Pattern has input without matching name in output: $" +
I->first);
}
// Scan all of the named values in the source pattern, rejecting them if the
// name isn't used in the dest, and isn't used to tie two values together.
for (std::map<std::string, NameRecord>::iterator
I = SrcNames.begin(), E = SrcNames.end(); I != E; ++I)
if (DstNames[I->first].first == nullptr && SrcNames[I->first].second == 1)
Pattern->error("Pattern has dead named input: $" + I->first);
PatternsToMatch.push_back(PTM);
}
void CodeGenDAGPatterns::InferInstructionFlags() {
const std::vector<const CodeGenInstruction*> &Instructions =
Target.getInstructionsByEnumValue();
// First try to infer flags from the primary instruction pattern, if any.
SmallVector<CodeGenInstruction*, 8> Revisit;
unsigned Errors = 0;
for (unsigned i = 0, e = Instructions.size(); i != e; ++i) {
CodeGenInstruction &InstInfo =
const_cast<CodeGenInstruction &>(*Instructions[i]);
// Get the primary instruction pattern.
const TreePattern *Pattern = getInstruction(InstInfo.TheDef).getPattern();
if (!Pattern) {
if (InstInfo.hasUndefFlags())
Revisit.push_back(&InstInfo);
continue;
}
InstAnalyzer PatInfo(*this);
PatInfo.Analyze(Pattern);
Errors += InferFromPattern(InstInfo, PatInfo, InstInfo.TheDef);
}
// Second, look for single-instruction patterns defined outside the
// instruction.
for (ptm_iterator I = ptm_begin(), E = ptm_end(); I != E; ++I) {
const PatternToMatch &PTM = *I;
// We can only infer from single-instruction patterns, otherwise we won't
// know which instruction should get the flags.
SmallVector<Record*, 8> PatInstrs;
getInstructionsInTree(PTM.getDstPattern(), PatInstrs);
if (PatInstrs.size() != 1)
continue;
// Get the single instruction.
CodeGenInstruction &InstInfo = Target.getInstruction(PatInstrs.front());
// Only infer properties from the first pattern. We'll verify the others.
if (InstInfo.InferredFrom)
continue;
InstAnalyzer PatInfo(*this);
PatInfo.Analyze(&PTM);
Errors += InferFromPattern(InstInfo, PatInfo, PTM.getSrcRecord());
}
if (Errors)
PrintFatalError("pattern conflicts");
// Revisit instructions with undefined flags and no pattern.
if (Target.guessInstructionProperties()) {
for (unsigned i = 0, e = Revisit.size(); i != e; ++i) {
CodeGenInstruction &InstInfo = *Revisit[i];
if (InstInfo.InferredFrom)
continue;
// The mayLoad and mayStore flags default to false.
// Conservatively assume hasSideEffects if it wasn't explicit.
if (InstInfo.hasSideEffects_Unset)
InstInfo.hasSideEffects = true;
}
return;
}
// Complain about any flags that are still undefined.
for (unsigned i = 0, e = Revisit.size(); i != e; ++i) {
CodeGenInstruction &InstInfo = *Revisit[i];
if (InstInfo.InferredFrom)
continue;
if (InstInfo.hasSideEffects_Unset)
PrintError(InstInfo.TheDef->getLoc(),
"Can't infer hasSideEffects from patterns");
if (InstInfo.mayStore_Unset)
PrintError(InstInfo.TheDef->getLoc(),
"Can't infer mayStore from patterns");
if (InstInfo.mayLoad_Unset)
PrintError(InstInfo.TheDef->getLoc(),
"Can't infer mayLoad from patterns");
}
}
/// Verify instruction flags against pattern node properties.
void CodeGenDAGPatterns::VerifyInstructionFlags() {
unsigned Errors = 0;
for (ptm_iterator I = ptm_begin(), E = ptm_end(); I != E; ++I) {
const PatternToMatch &PTM = *I;
SmallVector<Record*, 8> Instrs;
getInstructionsInTree(PTM.getDstPattern(), Instrs);
if (Instrs.empty())
continue;
// Count the number of instructions with each flag set.
unsigned NumSideEffects = 0;
unsigned NumStores = 0;
unsigned NumLoads = 0;
for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
const CodeGenInstruction &InstInfo = Target.getInstruction(Instrs[i]);
NumSideEffects += InstInfo.hasSideEffects;
NumStores += InstInfo.mayStore;
NumLoads += InstInfo.mayLoad;
}
// Analyze the source pattern.
InstAnalyzer PatInfo(*this);
PatInfo.Analyze(&PTM);
// Collect error messages.
SmallVector<std::string, 4> Msgs;
// Check for missing flags in the output.
// Permit extra flags for now at least.
if (PatInfo.hasSideEffects && !NumSideEffects)
Msgs.push_back("pattern has side effects, but hasSideEffects isn't set");
// Don't verify store flags on instructions with side effects. At least for
// intrinsics, side effects implies mayStore.
if (!PatInfo.hasSideEffects && PatInfo.mayStore && !NumStores)
Msgs.push_back("pattern may store, but mayStore isn't set");
// Similarly, mayStore implies mayLoad on intrinsics.
if (!PatInfo.mayStore && PatInfo.mayLoad && !NumLoads)
Msgs.push_back("pattern may load, but mayLoad isn't set");
// Print error messages.
if (Msgs.empty())
continue;
++Errors;
for (unsigned i = 0, e = Msgs.size(); i != e; ++i)
PrintError(PTM.getSrcRecord()->getLoc(), Twine(Msgs[i]) + " on the " +
(Instrs.size() == 1 ?
"instruction" : "output instructions"));
// Provide the location of the relevant instruction definitions.
for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
if (Instrs[i] != PTM.getSrcRecord())
PrintError(Instrs[i]->getLoc(), "defined here");
const CodeGenInstruction &InstInfo = Target.getInstruction(Instrs[i]);
if (InstInfo.InferredFrom &&
InstInfo.InferredFrom != InstInfo.TheDef &&
InstInfo.InferredFrom != PTM.getSrcRecord())
PrintError(InstInfo.InferredFrom->getLoc(), "inferred from patttern");
}
}
if (Errors)
PrintFatalError("Errors in DAG patterns");
}
/// Given a pattern result with an unresolved type, see if we can find one
/// instruction with an unresolved result type. Force this result type to an
/// arbitrary element if it's possible types to converge results.
static bool ForceArbitraryInstResultType(TreePatternNode *N, TreePattern &TP) {
if (N->isLeaf())
return false;
// Analyze children.
for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
if (ForceArbitraryInstResultType(N->getChild(i), TP))
return true;
if (!N->getOperator()->isSubClassOf("Instruction"))
return false;
// If this type is already concrete or completely unknown we can't do
// anything.
for (unsigned i = 0, e = N->getNumTypes(); i != e; ++i) {
if (N->getExtType(i).isCompletelyUnknown() || N->getExtType(i).isConcrete())
continue;
// Otherwise, force its type to the first possibility (an arbitrary choice).
if (N->getExtType(i).MergeInTypeInfo(N->getExtType(i).getTypeList()[0], TP))
return true;
}
return false;
}
void CodeGenDAGPatterns::ParsePatterns() {
std::vector<Record*> Patterns = Records.getAllDerivedDefinitions("Pattern");
for (unsigned i = 0, e = Patterns.size(); i != e; ++i) {
Record *CurPattern = Patterns[i];
DagInit *Tree = CurPattern->getValueAsDag("PatternToMatch");
// If the pattern references the null_frag, there's nothing to do.
if (hasNullFragReference(Tree))
continue;
TreePattern *Pattern = new TreePattern(CurPattern, Tree, true, *this);
// Inline pattern fragments into it.
Pattern->InlinePatternFragments();
ListInit *LI = CurPattern->getValueAsListInit("ResultInstrs");
if (LI->empty()) continue; // no pattern.
// Parse the instruction.
TreePattern Result(CurPattern, LI, false, *this);
// Inline pattern fragments into it.
Result.InlinePatternFragments();
if (Result.getNumTrees() != 1)
Result.error("Cannot handle instructions producing instructions "
"with temporaries yet!");
bool IterateInference;
bool InferredAllPatternTypes, InferredAllResultTypes;
do {
// Infer as many types as possible. If we cannot infer all of them, we
// can never do anything with this pattern: report it to the user.
InferredAllPatternTypes =
Pattern->InferAllTypes(&Pattern->getNamedNodesMap());
// Infer as many types as possible. If we cannot infer all of them, we
// can never do anything with this pattern: report it to the user.
InferredAllResultTypes =
Result.InferAllTypes(&Pattern->getNamedNodesMap());
IterateInference = false;
// Apply the type of the result to the source pattern. This helps us
// resolve cases where the input type is known to be a pointer type (which
// is considered resolved), but the result knows it needs to be 32- or
// 64-bits. Infer the other way for good measure.
for (unsigned i = 0, e = std::min(Result.getTree(0)->getNumTypes(),
Pattern->getTree(0)->getNumTypes());
i != e; ++i) {
IterateInference = Pattern->getTree(0)->UpdateNodeType(
i, Result.getTree(0)->getExtType(i), Result);
IterateInference |= Result.getTree(0)->UpdateNodeType(
i, Pattern->getTree(0)->getExtType(i), Result);
}
// If our iteration has converged and the input pattern's types are fully
// resolved but the result pattern is not fully resolved, we may have a
// situation where we have two instructions in the result pattern and
// the instructions require a common register class, but don't care about
// what actual MVT is used. This is actually a bug in our modelling:
// output patterns should have register classes, not MVTs.
//
// In any case, to handle this, we just go through and disambiguate some
// arbitrary types to the result pattern's nodes.
if (!IterateInference && InferredAllPatternTypes &&
!InferredAllResultTypes)
IterateInference =
ForceArbitraryInstResultType(Result.getTree(0), Result);
} while (IterateInference);
// Verify that we inferred enough types that we can do something with the
// pattern and result. If these fire the user has to add type casts.
if (!InferredAllPatternTypes)
Pattern->error("Could not infer all types in pattern!");
if (!InferredAllResultTypes) {
Pattern->dump();
Result.error("Could not infer all types in pattern result!");
}
// Validate that the input pattern is correct.
std::map<std::string, TreePatternNode*> InstInputs;
std::map<std::string, TreePatternNode*> InstResults;
std::vector<Record*> InstImpResults;
for (unsigned j = 0, ee = Pattern->getNumTrees(); j != ee; ++j)
FindPatternInputsAndOutputs(Pattern, Pattern->getTree(j),
InstInputs, InstResults,
InstImpResults);
// Promote the xform function to be an explicit node if set.
TreePatternNode *DstPattern = Result.getOnlyTree();
std::vector<TreePatternNode*> ResultNodeOperands;
for (unsigned ii = 0, ee = DstPattern->getNumChildren(); ii != ee; ++ii) {
TreePatternNode *OpNode = DstPattern->getChild(ii);
if (Record *Xform = OpNode->getTransformFn()) {
OpNode->setTransformFn(nullptr);
std::vector<TreePatternNode*> Children;
Children.push_back(OpNode);
OpNode = new TreePatternNode(Xform, Children, OpNode->getNumTypes());
}
ResultNodeOperands.push_back(OpNode);
}
DstPattern = Result.getOnlyTree();
if (!DstPattern->isLeaf())
DstPattern = new TreePatternNode(DstPattern->getOperator(),
ResultNodeOperands,
DstPattern->getNumTypes());
for (unsigned i = 0, e = Result.getOnlyTree()->getNumTypes(); i != e; ++i)
DstPattern->setType(i, Result.getOnlyTree()->getExtType(i));
TreePattern Temp(Result.getRecord(), DstPattern, false, *this);
Temp.InferAllTypes();
AddPatternToMatch(Pattern,
PatternToMatch(CurPattern,
CurPattern->getValueAsListInit("Predicates"),
Pattern->getTree(0),
Temp.getOnlyTree(), InstImpResults,
CurPattern->getValueAsInt("AddedComplexity"),
CurPattern->getID()));
}
}
/// CombineChildVariants - Given a bunch of permutations of each child of the
/// 'operator' node, put them together in all possible ways.
static void CombineChildVariants(TreePatternNode *Orig,
const std::vector<std::vector<TreePatternNode*> > &ChildVariants,
std::vector<TreePatternNode*> &OutVariants,
CodeGenDAGPatterns &CDP,
const MultipleUseVarSet &DepVars) {
// Make sure that each operand has at least one variant to choose from.
for (unsigned i = 0, e = ChildVariants.size(); i != e; ++i)
if (ChildVariants[i].empty())
return;
// The end result is an all-pairs construction of the resultant pattern.
std::vector<unsigned> Idxs;
Idxs.resize(ChildVariants.size());
bool NotDone;
do {
#ifndef NDEBUG
DEBUG(if (!Idxs.empty()) {
errs() << Orig->getOperator()->getName() << ": Idxs = [ ";
for (unsigned i = 0; i < Idxs.size(); ++i) {
errs() << Idxs[i] << " ";
}
errs() << "]\n";
});
#endif
// Create the variant and add it to the output list.
std::vector<TreePatternNode*> NewChildren;
for (unsigned i = 0, e = ChildVariants.size(); i != e; ++i)
NewChildren.push_back(ChildVariants[i][Idxs[i]]);
TreePatternNode *R = new TreePatternNode(Orig->getOperator(), NewChildren,
Orig->getNumTypes());
// Copy over properties.
R->setName(Orig->getName());
R->setPredicateFns(Orig->getPredicateFns());
R->setTransformFn(Orig->getTransformFn());
for (unsigned i = 0, e = Orig->getNumTypes(); i != e; ++i)
R->setType(i, Orig->getExtType(i));
// If this pattern cannot match, do not include it as a variant.
std::string ErrString;
if (!R->canPatternMatch(ErrString, CDP)) {
delete R;
} else {
bool AlreadyExists = false;
// Scan to see if this pattern has already been emitted. We can get
// duplication due to things like commuting:
// (and GPRC:$a, GPRC:$b) -> (and GPRC:$b, GPRC:$a)
// which are the same pattern. Ignore the dups.
for (unsigned i = 0, e = OutVariants.size(); i != e; ++i)
if (R->isIsomorphicTo(OutVariants[i], DepVars)) {
AlreadyExists = true;
break;
}
if (AlreadyExists)
delete R;
else
OutVariants.push_back(R);
}
// Increment indices to the next permutation by incrementing the
// indicies from last index backward, e.g., generate the sequence
// [0, 0], [0, 1], [1, 0], [1, 1].
int IdxsIdx;
for (IdxsIdx = Idxs.size() - 1; IdxsIdx >= 0; --IdxsIdx) {
if (++Idxs[IdxsIdx] == ChildVariants[IdxsIdx].size())
Idxs[IdxsIdx] = 0;
else
break;
}
NotDone = (IdxsIdx >= 0);
} while (NotDone);
}
/// CombineChildVariants - A helper function for binary operators.
///
static void CombineChildVariants(TreePatternNode *Orig,
const std::vector<TreePatternNode*> &LHS,
const std::vector<TreePatternNode*> &RHS,
std::vector<TreePatternNode*> &OutVariants,
CodeGenDAGPatterns &CDP,
const MultipleUseVarSet &DepVars) {
std::vector<std::vector<TreePatternNode*> > ChildVariants;
ChildVariants.push_back(LHS);
ChildVariants.push_back(RHS);
CombineChildVariants(Orig, ChildVariants, OutVariants, CDP, DepVars);
}
static void GatherChildrenOfAssociativeOpcode(TreePatternNode *N,
std::vector<TreePatternNode *> &Children) {
assert(N->getNumChildren()==2 &&"Associative but doesn't have 2 children!");
Record *Operator = N->getOperator();
// Only permit raw nodes.
if (!N->getName().empty() || !N->getPredicateFns().empty() ||
N->getTransformFn()) {
Children.push_back(N);
return;
}
if (N->getChild(0)->isLeaf() || N->getChild(0)->getOperator() != Operator)
Children.push_back(N->getChild(0));
else
GatherChildrenOfAssociativeOpcode(N->getChild(0), Children);
if (N->getChild(1)->isLeaf() || N->getChild(1)->getOperator() != Operator)
Children.push_back(N->getChild(1));
else
GatherChildrenOfAssociativeOpcode(N->getChild(1), Children);
}
/// GenerateVariantsOf - Given a pattern N, generate all permutations we can of
/// the (potentially recursive) pattern by using algebraic laws.
///
static void GenerateVariantsOf(TreePatternNode *N,
std::vector<TreePatternNode*> &OutVariants,
CodeGenDAGPatterns &CDP,
const MultipleUseVarSet &DepVars) {
// We cannot permute leaves or ComplexPattern uses.
if (N->isLeaf() || N->getOperator()->isSubClassOf("ComplexPattern")) {
OutVariants.push_back(N);
return;
}
// Look up interesting info about the node.
const SDNodeInfo &NodeInfo = CDP.getSDNodeInfo(N->getOperator());
// If this node is associative, re-associate.
if (NodeInfo.hasProperty(SDNPAssociative)) {
// Re-associate by pulling together all of the linked operators
std::vector<TreePatternNode*> MaximalChildren;
GatherChildrenOfAssociativeOpcode(N, MaximalChildren);
// Only handle child sizes of 3. Otherwise we'll end up trying too many
// permutations.
if (MaximalChildren.size() == 3) {
// Find the variants of all of our maximal children.
std::vector<TreePatternNode*> AVariants, BVariants, CVariants;
GenerateVariantsOf(MaximalChildren[0], AVariants, CDP, DepVars);
GenerateVariantsOf(MaximalChildren[1], BVariants, CDP, DepVars);
GenerateVariantsOf(MaximalChildren[2], CVariants, CDP, DepVars);
// There are only two ways we can permute the tree:
// (A op B) op C and A op (B op C)
// Within these forms, we can also permute A/B/C.
// Generate legal pair permutations of A/B/C.
std::vector<TreePatternNode*> ABVariants;
std::vector<TreePatternNode*> BAVariants;
std::vector<TreePatternNode*> ACVariants;
std::vector<TreePatternNode*> CAVariants;
std::vector<TreePatternNode*> BCVariants;
std::vector<TreePatternNode*> CBVariants;
CombineChildVariants(N, AVariants, BVariants, ABVariants, CDP, DepVars);
CombineChildVariants(N, BVariants, AVariants, BAVariants, CDP, DepVars);
CombineChildVariants(N, AVariants, CVariants, ACVariants, CDP, DepVars);
CombineChildVariants(N, CVariants, AVariants, CAVariants, CDP, DepVars);
CombineChildVariants(N, BVariants, CVariants, BCVariants, CDP, DepVars);
CombineChildVariants(N, CVariants, BVariants, CBVariants, CDP, DepVars);
// Combine those into the result: (x op x) op x
CombineChildVariants(N, ABVariants, CVariants, OutVariants, CDP, DepVars);
CombineChildVariants(N, BAVariants, CVariants, OutVariants, CDP, DepVars);
CombineChildVariants(N, ACVariants, BVariants, OutVariants, CDP, DepVars);
CombineChildVariants(N, CAVariants, BVariants, OutVariants, CDP, DepVars);
CombineChildVariants(N, BCVariants, AVariants, OutVariants, CDP, DepVars);
CombineChildVariants(N, CBVariants, AVariants, OutVariants, CDP, DepVars);
// Combine those into the result: x op (x op x)
CombineChildVariants(N, CVariants, ABVariants, OutVariants, CDP, DepVars);
CombineChildVariants(N, CVariants, BAVariants, OutVariants, CDP, DepVars);
CombineChildVariants(N, BVariants, ACVariants, OutVariants, CDP, DepVars);
CombineChildVariants(N, BVariants, CAVariants, OutVariants, CDP, DepVars);
CombineChildVariants(N, AVariants, BCVariants, OutVariants, CDP, DepVars);
CombineChildVariants(N, AVariants, CBVariants, OutVariants, CDP, DepVars);
return;
}
}
// Compute permutations of all children.
std::vector<std::vector<TreePatternNode*> > ChildVariants;
ChildVariants.resize(N->getNumChildren());
for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
GenerateVariantsOf(N->getChild(i), ChildVariants[i], CDP, DepVars);
// Build all permutations based on how the children were formed.
CombineChildVariants(N, ChildVariants, OutVariants, CDP, DepVars);
// If this node is commutative, consider the commuted order.
bool isCommIntrinsic = N->isCommutativeIntrinsic(CDP);
if (NodeInfo.hasProperty(SDNPCommutative) || isCommIntrinsic) {
assert((N->getNumChildren()==2 || isCommIntrinsic) &&
"Commutative but doesn't have 2 children!");
// Don't count children which are actually register references.
unsigned NC = 0;
for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i) {
TreePatternNode *Child = N->getChild(i);
if (Child->isLeaf())
if (DefInit *DI = dyn_cast<DefInit>(Child->getLeafValue())) {
Record *RR = DI->getDef();
if (RR->isSubClassOf("Register"))
continue;
}
NC++;
}
// Consider the commuted order.
if (isCommIntrinsic) {
// Commutative intrinsic. First operand is the intrinsic id, 2nd and 3rd
// operands are the commutative operands, and there might be more operands
// after those.
assert(NC >= 3 &&
"Commutative intrinsic should have at least 3 childrean!");
std::vector<std::vector<TreePatternNode*> > Variants;
Variants.push_back(ChildVariants[0]); // Intrinsic id.
Variants.push_back(ChildVariants[2]);
Variants.push_back(ChildVariants[1]);
for (unsigned i = 3; i != NC; ++i)
Variants.push_back(ChildVariants[i]);
CombineChildVariants(N, Variants, OutVariants, CDP, DepVars);
} else if (NC == 2)
CombineChildVariants(N, ChildVariants[1], ChildVariants[0],
OutVariants, CDP, DepVars);
}
}
// GenerateVariants - Generate variants. For example, commutative patterns can
// match multiple ways. Add them to PatternsToMatch as well.
void CodeGenDAGPatterns::GenerateVariants() {
DEBUG(errs() << "Generating instruction variants.\n");
// Loop over all of the patterns we've collected, checking to see if we can
// generate variants of the instruction, through the exploitation of
// identities. This permits the target to provide aggressive matching without
// the .td file having to contain tons of variants of instructions.
//
// Note that this loop adds new patterns to the PatternsToMatch list, but we
// intentionally do not reconsider these. Any variants of added patterns have
// already been added.
//
for (unsigned i = 0, e = PatternsToMatch.size(); i != e; ++i) {
MultipleUseVarSet DepVars;
std::vector<TreePatternNode*> Variants;
FindDepVars(PatternsToMatch[i].getSrcPattern(), DepVars);
DEBUG(errs() << "Dependent/multiply used variables: ");
DEBUG(DumpDepVars(DepVars));
DEBUG(errs() << "\n");
GenerateVariantsOf(PatternsToMatch[i].getSrcPattern(), Variants, *this,
DepVars);
assert(!Variants.empty() && "Must create at least original variant!");
Variants.erase(Variants.begin()); // Remove the original pattern.
if (Variants.empty()) // No variants for this pattern.
continue;
DEBUG(errs() << "FOUND VARIANTS OF: ";
PatternsToMatch[i].getSrcPattern()->dump();
errs() << "\n");
for (unsigned v = 0, e = Variants.size(); v != e; ++v) {
TreePatternNode *Variant = Variants[v];
DEBUG(errs() << " VAR#" << v << ": ";
Variant->dump();
errs() << "\n");
// Scan to see if an instruction or explicit pattern already matches this.
bool AlreadyExists = false;
for (unsigned p = 0, e = PatternsToMatch.size(); p != e; ++p) {
// Skip if the top level predicates do not match.
if (PatternsToMatch[i].getPredicates() !=
PatternsToMatch[p].getPredicates())
continue;
// Check to see if this variant already exists.
if (Variant->isIsomorphicTo(PatternsToMatch[p].getSrcPattern(),
DepVars)) {
DEBUG(errs() << " *** ALREADY EXISTS, ignoring variant.\n");
AlreadyExists = true;
break;
}
}
// If we already have it, ignore the variant.
if (AlreadyExists) continue;
// Otherwise, add it to the list of patterns we have.
PatternsToMatch.emplace_back(
PatternsToMatch[i].getSrcRecord(), PatternsToMatch[i].getPredicates(),
Variant, PatternsToMatch[i].getDstPattern(),
PatternsToMatch[i].getDstRegs(),
PatternsToMatch[i].getAddedComplexity(), Record::getNewUID());
}
DEBUG(errs() << "\n");
}
}
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/CallingConvEmitter.cpp | //===- CallingConvEmitter.cpp - Generate calling conventions --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This tablegen backend is responsible for emitting descriptions of the calling
// conventions supported by this target.
//
//===----------------------------------------------------------------------===//
#include "CodeGenTarget.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <cassert>
using namespace llvm;
namespace {
class CallingConvEmitter {
RecordKeeper &Records;
public:
explicit CallingConvEmitter(RecordKeeper &R) : Records(R) {}
void run(raw_ostream &o);
private:
void EmitCallingConv(Record *CC, raw_ostream &O);
void EmitAction(Record *Action, unsigned Indent, raw_ostream &O);
unsigned Counter;
};
} // End anonymous namespace
void CallingConvEmitter::run(raw_ostream &O) {
std::vector<Record*> CCs = Records.getAllDerivedDefinitions("CallingConv");
// Emit prototypes for all of the non-custom CC's so that they can forward ref
// each other.
for (unsigned i = 0, e = CCs.size(); i != e; ++i) {
if (!CCs[i]->getValueAsBit("Custom")) {
O << "static bool " << CCs[i]->getName()
<< "(unsigned ValNo, MVT ValVT,\n"
<< std::string(CCs[i]->getName().size() + 13, ' ')
<< "MVT LocVT, CCValAssign::LocInfo LocInfo,\n"
<< std::string(CCs[i]->getName().size() + 13, ' ')
<< "ISD::ArgFlagsTy ArgFlags, CCState &State);\n";
}
}
// Emit each non-custom calling convention description in full.
for (unsigned i = 0, e = CCs.size(); i != e; ++i) {
if (!CCs[i]->getValueAsBit("Custom"))
EmitCallingConv(CCs[i], O);
}
}
void CallingConvEmitter::EmitCallingConv(Record *CC, raw_ostream &O) {
ListInit *CCActions = CC->getValueAsListInit("Actions");
Counter = 0;
O << "\n\nstatic bool " << CC->getName()
<< "(unsigned ValNo, MVT ValVT,\n"
<< std::string(CC->getName().size()+13, ' ')
<< "MVT LocVT, CCValAssign::LocInfo LocInfo,\n"
<< std::string(CC->getName().size()+13, ' ')
<< "ISD::ArgFlagsTy ArgFlags, CCState &State) {\n";
// Emit all of the actions, in order.
for (unsigned i = 0, e = CCActions->size(); i != e; ++i) {
O << "\n";
EmitAction(CCActions->getElementAsRecord(i), 2, O);
}
O << "\n return true; // CC didn't match.\n";
O << "}\n";
}
void CallingConvEmitter::EmitAction(Record *Action,
unsigned Indent, raw_ostream &O) {
std::string IndentStr = std::string(Indent, ' ');
if (Action->isSubClassOf("CCPredicateAction")) {
O << IndentStr << "if (";
if (Action->isSubClassOf("CCIfType")) {
ListInit *VTs = Action->getValueAsListInit("VTs");
for (unsigned i = 0, e = VTs->size(); i != e; ++i) {
Record *VT = VTs->getElementAsRecord(i);
if (i != 0) O << " ||\n " << IndentStr;
O << "LocVT == " << getEnumName(getValueType(VT));
}
} else if (Action->isSubClassOf("CCIf")) {
O << Action->getValueAsString("Predicate");
} else {
Action->dump();
PrintFatalError("Unknown CCPredicateAction!");
}
O << ") {\n";
EmitAction(Action->getValueAsDef("SubAction"), Indent+2, O);
O << IndentStr << "}\n";
} else {
if (Action->isSubClassOf("CCDelegateTo")) {
Record *CC = Action->getValueAsDef("CC");
O << IndentStr << "if (!" << CC->getName()
<< "(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))\n"
<< IndentStr << " return false;\n";
} else if (Action->isSubClassOf("CCAssignToReg")) {
ListInit *RegList = Action->getValueAsListInit("RegList");
if (RegList->size() == 1) {
O << IndentStr << "if (unsigned Reg = State.AllocateReg(";
O << getQualifiedName(RegList->getElementAsRecord(0)) << ")) {\n";
} else {
O << IndentStr << "static const MCPhysReg RegList" << ++Counter
<< "[] = {\n";
O << IndentStr << " ";
for (unsigned i = 0, e = RegList->size(); i != e; ++i) {
if (i != 0) O << ", ";
O << getQualifiedName(RegList->getElementAsRecord(i));
}
O << "\n" << IndentStr << "};\n";
O << IndentStr << "if (unsigned Reg = State.AllocateReg(RegList"
<< Counter << ")) {\n";
}
O << IndentStr << " State.addLoc(CCValAssign::getReg(ValNo, ValVT, "
<< "Reg, LocVT, LocInfo));\n";
O << IndentStr << " return false;\n";
O << IndentStr << "}\n";
} else if (Action->isSubClassOf("CCAssignToRegWithShadow")) {
ListInit *RegList = Action->getValueAsListInit("RegList");
ListInit *ShadowRegList = Action->getValueAsListInit("ShadowRegList");
if (!ShadowRegList->empty() && ShadowRegList->size() != RegList->size())
PrintFatalError("Invalid length of list of shadowed registers");
if (RegList->size() == 1) {
O << IndentStr << "if (unsigned Reg = State.AllocateReg(";
O << getQualifiedName(RegList->getElementAsRecord(0));
O << ", " << getQualifiedName(ShadowRegList->getElementAsRecord(0));
O << ")) {\n";
} else {
unsigned RegListNumber = ++Counter;
unsigned ShadowRegListNumber = ++Counter;
O << IndentStr << "static const MCPhysReg RegList" << RegListNumber
<< "[] = {\n";
O << IndentStr << " ";
for (unsigned i = 0, e = RegList->size(); i != e; ++i) {
if (i != 0) O << ", ";
O << getQualifiedName(RegList->getElementAsRecord(i));
}
O << "\n" << IndentStr << "};\n";
O << IndentStr << "static const MCPhysReg RegList"
<< ShadowRegListNumber << "[] = {\n";
O << IndentStr << " ";
for (unsigned i = 0, e = ShadowRegList->size(); i != e; ++i) {
if (i != 0) O << ", ";
O << getQualifiedName(ShadowRegList->getElementAsRecord(i));
}
O << "\n" << IndentStr << "};\n";
O << IndentStr << "if (unsigned Reg = State.AllocateReg(RegList"
<< RegListNumber << ", " << "RegList" << ShadowRegListNumber
<< ")) {\n";
}
O << IndentStr << " State.addLoc(CCValAssign::getReg(ValNo, ValVT, "
<< "Reg, LocVT, LocInfo));\n";
O << IndentStr << " return false;\n";
O << IndentStr << "}\n";
} else if (Action->isSubClassOf("CCAssignToStack")) {
int Size = Action->getValueAsInt("Size");
int Align = Action->getValueAsInt("Align");
O << IndentStr << "unsigned Offset" << ++Counter
<< " = State.AllocateStack(";
if (Size)
O << Size << ", ";
else
O << "\n" << IndentStr
<< " State.getMachineFunction().getTarget().getDataLayout()"
"->getTypeAllocSize(EVT(LocVT).getTypeForEVT(State.getContext())),"
" ";
if (Align)
O << Align;
else
O << "\n" << IndentStr
<< " State.getMachineFunction().getTarget().getDataLayout()"
"->getABITypeAlignment(EVT(LocVT).getTypeForEVT(State.getContext()"
"))";
O << ");\n" << IndentStr
<< "State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset"
<< Counter << ", LocVT, LocInfo));\n";
O << IndentStr << "return false;\n";
} else if (Action->isSubClassOf("CCAssignToStackWithShadow")) {
int Size = Action->getValueAsInt("Size");
int Align = Action->getValueAsInt("Align");
ListInit *ShadowRegList = Action->getValueAsListInit("ShadowRegList");
unsigned ShadowRegListNumber = ++Counter;
O << IndentStr << "static const MCPhysReg ShadowRegList"
<< ShadowRegListNumber << "[] = {\n";
O << IndentStr << " ";
for (unsigned i = 0, e = ShadowRegList->size(); i != e; ++i) {
if (i != 0) O << ", ";
O << getQualifiedName(ShadowRegList->getElementAsRecord(i));
}
O << "\n" << IndentStr << "};\n";
O << IndentStr << "unsigned Offset" << ++Counter
<< " = State.AllocateStack("
<< Size << ", " << Align << ", "
<< "ShadowRegList" << ShadowRegListNumber << ");\n";
O << IndentStr << "State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset"
<< Counter << ", LocVT, LocInfo));\n";
O << IndentStr << "return false;\n";
} else if (Action->isSubClassOf("CCPromoteToType")) {
Record *DestTy = Action->getValueAsDef("DestTy");
MVT::SimpleValueType DestVT = getValueType(DestTy);
O << IndentStr << "LocVT = " << getEnumName(DestVT) <<";\n";
if (MVT(DestVT).isFloatingPoint()) {
O << IndentStr << "LocInfo = CCValAssign::FPExt;\n";
} else {
O << IndentStr << "if (ArgFlags.isSExt())\n"
<< IndentStr << IndentStr << "LocInfo = CCValAssign::SExt;\n"
<< IndentStr << "else if (ArgFlags.isZExt())\n"
<< IndentStr << IndentStr << "LocInfo = CCValAssign::ZExt;\n"
<< IndentStr << "else\n"
<< IndentStr << IndentStr << "LocInfo = CCValAssign::AExt;\n";
}
} else if (Action->isSubClassOf("CCPromoteToUpperBitsInType")) {
Record *DestTy = Action->getValueAsDef("DestTy");
MVT::SimpleValueType DestVT = getValueType(DestTy);
O << IndentStr << "LocVT = " << getEnumName(DestVT) << ";\n";
if (MVT(DestVT).isFloatingPoint()) {
PrintFatalError("CCPromoteToUpperBitsInType does not handle floating "
"point");
} else {
O << IndentStr << "if (ArgFlags.isSExt())\n"
<< IndentStr << IndentStr << "LocInfo = CCValAssign::SExtUpper;\n"
<< IndentStr << "else if (ArgFlags.isZExt())\n"
<< IndentStr << IndentStr << "LocInfo = CCValAssign::ZExtUpper;\n"
<< IndentStr << "else\n"
<< IndentStr << IndentStr << "LocInfo = CCValAssign::AExtUpper;\n";
}
} else if (Action->isSubClassOf("CCBitConvertToType")) {
Record *DestTy = Action->getValueAsDef("DestTy");
O << IndentStr << "LocVT = " << getEnumName(getValueType(DestTy)) <<";\n";
O << IndentStr << "LocInfo = CCValAssign::BCvt;\n";
} else if (Action->isSubClassOf("CCPassIndirect")) {
Record *DestTy = Action->getValueAsDef("DestTy");
O << IndentStr << "LocVT = " << getEnumName(getValueType(DestTy)) <<";\n";
O << IndentStr << "LocInfo = CCValAssign::Indirect;\n";
} else if (Action->isSubClassOf("CCPassByVal")) {
int Size = Action->getValueAsInt("Size");
int Align = Action->getValueAsInt("Align");
O << IndentStr
<< "State.HandleByVal(ValNo, ValVT, LocVT, LocInfo, "
<< Size << ", " << Align << ", ArgFlags);\n";
O << IndentStr << "return false;\n";
} else if (Action->isSubClassOf("CCCustom")) {
O << IndentStr
<< "if (" << Action->getValueAsString("FuncName") << "(ValNo, ValVT, "
<< "LocVT, LocInfo, ArgFlags, State))\n";
O << IndentStr << IndentStr << "return false;\n";
} else {
Action->dump();
PrintFatalError("Unknown CCAction!");
}
}
}
namespace llvm {
void EmitCallingConv(RecordKeeper &RK, raw_ostream &OS) {
emitSourceFileHeader("Calling Convention Implementation Fragment", OS);
CallingConvEmitter(RK).run(OS);
}
} // End llvm namespace
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/CodeGenTarget.h | //===- CodeGenTarget.h - Target Class Wrapper -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines wrappers for the Target class and related global
// functionality. This makes it easier to access the data and provides a single
// place that needs to check it for validity. All of these classes abort
// on error conditions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_UTILS_TABLEGEN_CODEGENTARGET_H
#define LLVM_UTILS_TABLEGEN_CODEGENTARGET_H
#include "CodeGenInstruction.h"
#include "CodeGenRegisters.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TableGen/Record.h"
#include <algorithm>
namespace llvm {
struct CodeGenRegister;
class CodeGenSchedModels;
class CodeGenTarget;
// SelectionDAG node properties.
// SDNPMemOperand: indicates that a node touches memory and therefore must
// have an associated memory operand that describes the access.
enum SDNP {
SDNPCommutative,
SDNPAssociative,
SDNPHasChain,
SDNPOutGlue,
SDNPInGlue,
SDNPOptInGlue,
SDNPMayLoad,
SDNPMayStore,
SDNPSideEffect,
SDNPMemOperand,
SDNPVariadic,
SDNPWantRoot,
SDNPWantParent
};
/// getValueType - Return the MVT::SimpleValueType that the specified TableGen
/// record corresponds to.
MVT::SimpleValueType getValueType(Record *Rec);
std::string getName(MVT::SimpleValueType T);
std::string getEnumName(MVT::SimpleValueType T);
/// getQualifiedName - Return the name of the specified record, with a
/// namespace qualifier if the record contains one.
std::string getQualifiedName(const Record *R);
/// CodeGenTarget - This class corresponds to the Target class in the .td files.
///
class CodeGenTarget {
RecordKeeper &Records;
Record *TargetRec;
mutable DenseMap<const Record*,
std::unique_ptr<CodeGenInstruction>> Instructions;
mutable std::unique_ptr<CodeGenRegBank> RegBank;
mutable std::vector<Record*> RegAltNameIndices;
mutable SmallVector<MVT::SimpleValueType, 8> LegalValueTypes;
void ReadRegAltNameIndices() const;
void ReadInstructions() const;
void ReadLegalValueTypes() const;
mutable std::unique_ptr<CodeGenSchedModels> SchedModels;
mutable std::vector<const CodeGenInstruction*> InstrsByEnum;
public:
CodeGenTarget(RecordKeeper &Records);
~CodeGenTarget();
Record *getTargetRecord() const { return TargetRec; }
const std::string &getName() const;
/// getInstNamespace - Return the target-specific instruction namespace.
///
std::string getInstNamespace() const;
/// getInstructionSet - Return the InstructionSet object.
///
Record *getInstructionSet() const;
/// getAsmParser - Return the AssemblyParser definition for this target.
///
Record *getAsmParser() const;
/// getAsmParserVariant - Return the AssmblyParserVariant definition for
/// this target.
///
Record *getAsmParserVariant(unsigned i) const;
/// getAsmParserVariantCount - Return the AssmblyParserVariant definition
/// available for this target.
///
unsigned getAsmParserVariantCount() const;
/// getAsmWriter - Return the AssemblyWriter definition for this target.
///
Record *getAsmWriter() const;
/// getRegBank - Return the register bank description.
CodeGenRegBank &getRegBank() const;
/// getRegisterByName - If there is a register with the specific AsmName,
/// return it.
const CodeGenRegister *getRegisterByName(StringRef Name) const;
const std::vector<Record*> &getRegAltNameIndices() const {
if (RegAltNameIndices.empty()) ReadRegAltNameIndices();
return RegAltNameIndices;
}
const CodeGenRegisterClass &getRegisterClass(Record *R) const {
return *getRegBank().getRegClass(R);
}
/// getRegisterVTs - Find the union of all possible SimpleValueTypes for the
/// specified physical register.
std::vector<MVT::SimpleValueType> getRegisterVTs(Record *R) const;
ArrayRef<MVT::SimpleValueType> getLegalValueTypes() const {
if (LegalValueTypes.empty()) ReadLegalValueTypes();
return LegalValueTypes;
}
/// isLegalValueType - Return true if the specified value type is natively
/// supported by the target (i.e. there are registers that directly hold it).
bool isLegalValueType(MVT::SimpleValueType VT) const {
ArrayRef<MVT::SimpleValueType> LegalVTs = getLegalValueTypes();
for (unsigned i = 0, e = LegalVTs.size(); i != e; ++i)
if (LegalVTs[i] == VT) return true;
return false;
}
CodeGenSchedModels &getSchedModels() const;
private:
DenseMap<const Record*, std::unique_ptr<CodeGenInstruction>> &
getInstructions() const {
if (Instructions.empty()) ReadInstructions();
return Instructions;
}
public:
CodeGenInstruction &getInstruction(const Record *InstRec) const {
if (Instructions.empty()) ReadInstructions();
auto I = Instructions.find(InstRec);
assert(I != Instructions.end() && "Not an instruction");
return *I->second;
}
/// getInstructionsByEnumValue - Return all of the instructions defined by the
/// target, ordered by their enum value.
const std::vector<const CodeGenInstruction*> &
getInstructionsByEnumValue() const {
if (InstrsByEnum.empty()) ComputeInstrsByEnum();
return InstrsByEnum;
}
typedef std::vector<const CodeGenInstruction*>::const_iterator inst_iterator;
inst_iterator inst_begin() const{return getInstructionsByEnumValue().begin();}
inst_iterator inst_end() const { return getInstructionsByEnumValue().end(); }
iterator_range<inst_iterator> instructions() const {
return iterator_range<inst_iterator>(inst_begin(), inst_end());
}
/// isLittleEndianEncoding - are instruction bit patterns defined as [0..n]?
///
bool isLittleEndianEncoding() const;
/// reverseBitsForLittleEndianEncoding - For little-endian instruction bit
/// encodings, reverse the bit order of all instructions.
void reverseBitsForLittleEndianEncoding();
/// guessInstructionProperties - should we just guess unset instruction
/// properties?
bool guessInstructionProperties() const;
private:
void ComputeInstrsByEnum() const;
};
/// ComplexPattern - ComplexPattern info, corresponding to the ComplexPattern
/// tablegen class in TargetSelectionDAG.td
class ComplexPattern {
MVT::SimpleValueType Ty;
unsigned NumOperands;
std::string SelectFunc;
std::vector<Record*> RootNodes;
unsigned Properties; // Node properties
public:
ComplexPattern() : NumOperands(0) {}
ComplexPattern(Record *R);
MVT::SimpleValueType getValueType() const { return Ty; }
unsigned getNumOperands() const { return NumOperands; }
const std::string &getSelectFunc() const { return SelectFunc; }
const std::vector<Record*> &getRootNodes() const {
return RootNodes;
}
bool hasProperty(enum SDNP Prop) const { return Properties & (1 << Prop); }
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/X86RecognizableInstr.cpp | //===- X86RecognizableInstr.cpp - Disassembler instruction spec --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is part of the X86 Disassembler Emitter.
// It contains the implementation of a single recognizable instruction.
// Documentation for the disassembler emitter in general can be found in
// X86DisasemblerEmitter.h.
//
//===----------------------------------------------------------------------===//
#include "X86RecognizableInstr.h"
#include "X86DisassemblerShared.h"
#include "X86ModRMFilters.h"
#include "llvm/Support/ErrorHandling.h"
#include <string>
using namespace llvm;
#define MRM_MAPPING \
MAP(C0, 32) \
MAP(C1, 33) \
MAP(C2, 34) \
MAP(C3, 35) \
MAP(C4, 36) \
MAP(C5, 37) \
MAP(C6, 38) \
MAP(C7, 39) \
MAP(C8, 40) \
MAP(C9, 41) \
MAP(CA, 42) \
MAP(CB, 43) \
MAP(CC, 44) \
MAP(CD, 45) \
MAP(CE, 46) \
MAP(CF, 47) \
MAP(D0, 48) \
MAP(D1, 49) \
MAP(D2, 50) \
MAP(D3, 51) \
MAP(D4, 52) \
MAP(D5, 53) \
MAP(D6, 54) \
MAP(D7, 55) \
MAP(D8, 56) \
MAP(D9, 57) \
MAP(DA, 58) \
MAP(DB, 59) \
MAP(DC, 60) \
MAP(DD, 61) \
MAP(DE, 62) \
MAP(DF, 63) \
MAP(E0, 64) \
MAP(E1, 65) \
MAP(E2, 66) \
MAP(E3, 67) \
MAP(E4, 68) \
MAP(E5, 69) \
MAP(E6, 70) \
MAP(E7, 71) \
MAP(E8, 72) \
MAP(E9, 73) \
MAP(EA, 74) \
MAP(EB, 75) \
MAP(EC, 76) \
MAP(ED, 77) \
MAP(EE, 78) \
MAP(EF, 79) \
MAP(F0, 80) \
MAP(F1, 81) \
MAP(F2, 82) \
MAP(F3, 83) \
MAP(F4, 84) \
MAP(F5, 85) \
MAP(F6, 86) \
MAP(F7, 87) \
MAP(F8, 88) \
MAP(F9, 89) \
MAP(FA, 90) \
MAP(FB, 91) \
MAP(FC, 92) \
MAP(FD, 93) \
MAP(FE, 94) \
MAP(FF, 95)
// A clone of X86 since we can't depend on something that is generated.
namespace X86Local {
enum {
Pseudo = 0,
RawFrm = 1,
AddRegFrm = 2,
MRMDestReg = 3,
MRMDestMem = 4,
MRMSrcReg = 5,
MRMSrcMem = 6,
RawFrmMemOffs = 7,
RawFrmSrc = 8,
RawFrmDst = 9,
RawFrmDstSrc = 10,
RawFrmImm8 = 11,
RawFrmImm16 = 12,
MRMXr = 14, MRMXm = 15,
MRM0r = 16, MRM1r = 17, MRM2r = 18, MRM3r = 19,
MRM4r = 20, MRM5r = 21, MRM6r = 22, MRM7r = 23,
MRM0m = 24, MRM1m = 25, MRM2m = 26, MRM3m = 27,
MRM4m = 28, MRM5m = 29, MRM6m = 30, MRM7m = 31,
#define MAP(from, to) MRM_##from = to,
MRM_MAPPING
#undef MAP
lastMRM
};
enum {
OB = 0, TB = 1, T8 = 2, TA = 3, XOP8 = 4, XOP9 = 5, XOPA = 6
};
enum {
PS = 1, PD = 2, XS = 3, XD = 4
};
enum {
VEX = 1, XOP = 2, EVEX = 3
};
enum {
OpSize16 = 1, OpSize32 = 2
};
enum {
AdSize16 = 1, AdSize32 = 2, AdSize64 = 3
};
}
using namespace X86Disassembler;
/// isRegFormat - Indicates whether a particular form requires the Mod field of
/// the ModR/M byte to be 0b11.
///
/// @param form - The form of the instruction.
/// @return - true if the form implies that Mod must be 0b11, false
/// otherwise.
static bool isRegFormat(uint8_t form) {
return (form == X86Local::MRMDestReg ||
form == X86Local::MRMSrcReg ||
form == X86Local::MRMXr ||
(form >= X86Local::MRM0r && form <= X86Local::MRM7r));
}
/// byteFromBitsInit - Extracts a value at most 8 bits in width from a BitsInit.
/// Useful for switch statements and the like.
///
/// @param init - A reference to the BitsInit to be decoded.
/// @return - The field, with the first bit in the BitsInit as the lowest
/// order bit.
static uint8_t byteFromBitsInit(BitsInit &init) {
int width = init.getNumBits();
assert(width <= 8 && "Field is too large for uint8_t!");
int index;
uint8_t mask = 0x01;
uint8_t ret = 0;
for (index = 0; index < width; index++) {
if (static_cast<BitInit*>(init.getBit(index))->getValue())
ret |= mask;
mask <<= 1;
}
return ret;
}
/// byteFromRec - Extract a value at most 8 bits in with from a Record given the
/// name of the field.
///
/// @param rec - The record from which to extract the value.
/// @param name - The name of the field in the record.
/// @return - The field, as translated by byteFromBitsInit().
static uint8_t byteFromRec(const Record* rec, const std::string &name) {
BitsInit* bits = rec->getValueAsBitsInit(name);
return byteFromBitsInit(*bits);
}
RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
const CodeGenInstruction &insn,
InstrUID uid) {
UID = uid;
Rec = insn.TheDef;
Name = Rec->getName();
Spec = &tables.specForUID(UID);
if (!Rec->isSubClassOf("X86Inst")) {
ShouldBeEmitted = false;
return;
}
OpPrefix = byteFromRec(Rec, "OpPrefixBits");
OpMap = byteFromRec(Rec, "OpMapBits");
Opcode = byteFromRec(Rec, "Opcode");
Form = byteFromRec(Rec, "FormBits");
Encoding = byteFromRec(Rec, "OpEncBits");
OpSize = byteFromRec(Rec, "OpSizeBits");
AdSize = byteFromRec(Rec, "AdSizeBits");
HasREX_WPrefix = Rec->getValueAsBit("hasREX_WPrefix");
HasVEX_4V = Rec->getValueAsBit("hasVEX_4V");
HasVEX_4VOp3 = Rec->getValueAsBit("hasVEX_4VOp3");
HasVEX_WPrefix = Rec->getValueAsBit("hasVEX_WPrefix");
HasMemOp4Prefix = Rec->getValueAsBit("hasMemOp4Prefix");
IgnoresVEX_L = Rec->getValueAsBit("ignoresVEX_L");
HasEVEX_L2Prefix = Rec->getValueAsBit("hasEVEX_L2");
HasEVEX_K = Rec->getValueAsBit("hasEVEX_K");
HasEVEX_KZ = Rec->getValueAsBit("hasEVEX_Z");
HasEVEX_B = Rec->getValueAsBit("hasEVEX_B");
IsCodeGenOnly = Rec->getValueAsBit("isCodeGenOnly");
ForceDisassemble = Rec->getValueAsBit("ForceDisassemble");
CD8_Scale = byteFromRec(Rec, "CD8_Scale");
Name = Rec->getName();
AsmString = Rec->getValueAsString("AsmString");
Operands = &insn.Operands.OperandList;
HasVEX_LPrefix = Rec->getValueAsBit("hasVEX_L");
// Check for 64-bit inst which does not require REX
Is32Bit = false;
Is64Bit = false;
// FIXME: Is there some better way to check for In64BitMode?
std::vector<Record*> Predicates = Rec->getValueAsListOfDefs("Predicates");
for (unsigned i = 0, e = Predicates.size(); i != e; ++i) {
if (Predicates[i]->getName().find("Not64Bit") != Name.npos ||
Predicates[i]->getName().find("In32Bit") != Name.npos) {
Is32Bit = true;
break;
}
if (Predicates[i]->getName().find("In64Bit") != Name.npos) {
Is64Bit = true;
break;
}
}
if (Form == X86Local::Pseudo || (IsCodeGenOnly && !ForceDisassemble)) {
ShouldBeEmitted = false;
return;
}
// Special case since there is no attribute class for 64-bit and VEX
if (Name == "VMASKMOVDQU64") {
ShouldBeEmitted = false;
return;
}
ShouldBeEmitted = true;
}
void RecognizableInstr::processInstr(DisassemblerTables &tables,
const CodeGenInstruction &insn,
InstrUID uid)
{
// Ignore "asm parser only" instructions.
if (insn.TheDef->getValueAsBit("isAsmParserOnly"))
return;
RecognizableInstr recogInstr(tables, insn, uid);
if (recogInstr.shouldBeEmitted()) {
recogInstr.emitInstructionSpecifier();
recogInstr.emitDecodePath(tables);
}
}
#define EVEX_KB(n) (HasEVEX_KZ && HasEVEX_B ? n##_KZ_B : \
(HasEVEX_K && HasEVEX_B ? n##_K_B : \
(HasEVEX_KZ ? n##_KZ : \
(HasEVEX_K? n##_K : (HasEVEX_B ? n##_B : n)))))
InstructionContext RecognizableInstr::insnContext() const {
InstructionContext insnContext;
if (Encoding == X86Local::EVEX) {
if (HasVEX_LPrefix && HasEVEX_L2Prefix) {
errs() << "Don't support VEX.L if EVEX_L2 is enabled: " << Name << "\n";
llvm_unreachable("Don't support VEX.L if EVEX_L2 is enabled");
}
// VEX_L & VEX_W
if (HasVEX_LPrefix && HasVEX_WPrefix) {
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_L_W_OPSIZE);
else if (OpPrefix == X86Local::XS)
insnContext = EVEX_KB(IC_EVEX_L_W_XS);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_L_W_XD);
else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_L_W);
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
} else if (HasVEX_LPrefix) {
// VEX_L
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_L_OPSIZE);
else if (OpPrefix == X86Local::XS)
insnContext = EVEX_KB(IC_EVEX_L_XS);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_L_XD);
else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_L);
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
}
else if (HasEVEX_L2Prefix && HasVEX_WPrefix) {
// EVEX_L2 & VEX_W
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_L2_W_OPSIZE);
else if (OpPrefix == X86Local::XS)
insnContext = EVEX_KB(IC_EVEX_L2_W_XS);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_L2_W_XD);
else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_L2_W);
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
} else if (HasEVEX_L2Prefix) {
// EVEX_L2
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_L2_OPSIZE);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_L2_XD);
else if (OpPrefix == X86Local::XS)
insnContext = EVEX_KB(IC_EVEX_L2_XS);
else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_L2);
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
}
else if (HasVEX_WPrefix) {
// VEX_W
if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_W_OPSIZE);
else if (OpPrefix == X86Local::XS)
insnContext = EVEX_KB(IC_EVEX_W_XS);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_W_XD);
else if (OpPrefix == X86Local::PS)
insnContext = EVEX_KB(IC_EVEX_W);
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
}
// No L, no W
else if (OpPrefix == X86Local::PD)
insnContext = EVEX_KB(IC_EVEX_OPSIZE);
else if (OpPrefix == X86Local::XD)
insnContext = EVEX_KB(IC_EVEX_XD);
else if (OpPrefix == X86Local::XS)
insnContext = EVEX_KB(IC_EVEX_XS);
else
insnContext = EVEX_KB(IC_EVEX);
/// eof EVEX
} else if (Encoding == X86Local::VEX || Encoding == X86Local::XOP) {
if (HasVEX_LPrefix && HasVEX_WPrefix) {
if (OpPrefix == X86Local::PD)
insnContext = IC_VEX_L_W_OPSIZE;
else if (OpPrefix == X86Local::XS)
insnContext = IC_VEX_L_W_XS;
else if (OpPrefix == X86Local::XD)
insnContext = IC_VEX_L_W_XD;
else if (OpPrefix == X86Local::PS)
insnContext = IC_VEX_L_W;
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
} else if (OpPrefix == X86Local::PD && HasVEX_LPrefix)
insnContext = IC_VEX_L_OPSIZE;
else if (OpPrefix == X86Local::PD && HasVEX_WPrefix)
insnContext = IC_VEX_W_OPSIZE;
else if (OpPrefix == X86Local::PD)
insnContext = IC_VEX_OPSIZE;
else if (HasVEX_LPrefix && OpPrefix == X86Local::XS)
insnContext = IC_VEX_L_XS;
else if (HasVEX_LPrefix && OpPrefix == X86Local::XD)
insnContext = IC_VEX_L_XD;
else if (HasVEX_WPrefix && OpPrefix == X86Local::XS)
insnContext = IC_VEX_W_XS;
else if (HasVEX_WPrefix && OpPrefix == X86Local::XD)
insnContext = IC_VEX_W_XD;
else if (HasVEX_WPrefix && OpPrefix == X86Local::PS)
insnContext = IC_VEX_W;
else if (HasVEX_LPrefix && OpPrefix == X86Local::PS)
insnContext = IC_VEX_L;
else if (OpPrefix == X86Local::XD)
insnContext = IC_VEX_XD;
else if (OpPrefix == X86Local::XS)
insnContext = IC_VEX_XS;
else if (OpPrefix == X86Local::PS)
insnContext = IC_VEX;
else {
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
} else if (Is64Bit || HasREX_WPrefix || AdSize == X86Local::AdSize64) {
if (HasREX_WPrefix && (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD))
insnContext = IC_64BIT_REXW_OPSIZE;
else if (HasREX_WPrefix && AdSize == X86Local::AdSize32)
insnContext = IC_64BIT_REXW_ADSIZE;
else if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XD)
insnContext = IC_64BIT_XD_OPSIZE;
else if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XS)
insnContext = IC_64BIT_XS_OPSIZE;
else if (OpSize == X86Local::OpSize16 && AdSize == X86Local::AdSize32)
insnContext = IC_64BIT_OPSIZE_ADSIZE;
else if (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD)
insnContext = IC_64BIT_OPSIZE;
else if (AdSize == X86Local::AdSize32)
insnContext = IC_64BIT_ADSIZE;
else if (HasREX_WPrefix && OpPrefix == X86Local::XS)
insnContext = IC_64BIT_REXW_XS;
else if (HasREX_WPrefix && OpPrefix == X86Local::XD)
insnContext = IC_64BIT_REXW_XD;
else if (OpPrefix == X86Local::XD)
insnContext = IC_64BIT_XD;
else if (OpPrefix == X86Local::XS)
insnContext = IC_64BIT_XS;
else if (HasREX_WPrefix)
insnContext = IC_64BIT_REXW;
else
insnContext = IC_64BIT;
} else {
if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XD)
insnContext = IC_XD_OPSIZE;
else if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XS)
insnContext = IC_XS_OPSIZE;
else if (OpSize == X86Local::OpSize16 && AdSize == X86Local::AdSize16)
insnContext = IC_OPSIZE_ADSIZE;
else if (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD)
insnContext = IC_OPSIZE;
else if (AdSize == X86Local::AdSize16)
insnContext = IC_ADSIZE;
else if (OpPrefix == X86Local::XD)
insnContext = IC_XD;
else if (OpPrefix == X86Local::XS)
insnContext = IC_XS;
else
insnContext = IC;
}
return insnContext;
}
void RecognizableInstr::adjustOperandEncoding(OperandEncoding &encoding) {
// The scaling factor for AVX512 compressed displacement encoding is an
// instruction attribute. Adjust the ModRM encoding type to include the
// scale for compressed displacement.
if (encoding != ENCODING_RM || CD8_Scale == 0)
return;
encoding = (OperandEncoding)(encoding + Log2_32(CD8_Scale));
assert(encoding <= ENCODING_RM_CD64 && "Invalid CDisp scaling");
}
void RecognizableInstr::handleOperand(bool optional, unsigned &operandIndex,
unsigned &physicalOperandIndex,
unsigned &numPhysicalOperands,
const unsigned *operandMapping,
OperandEncoding (*encodingFromString)
(const std::string&,
uint8_t OpSize)) {
if (optional) {
if (physicalOperandIndex >= numPhysicalOperands)
return;
} else {
assert(physicalOperandIndex < numPhysicalOperands);
}
while (operandMapping[operandIndex] != operandIndex) {
Spec->operands[operandIndex].encoding = ENCODING_DUP;
Spec->operands[operandIndex].type =
(OperandType)(TYPE_DUP0 + operandMapping[operandIndex]);
++operandIndex;
}
const std::string &typeName = (*Operands)[operandIndex].Rec->getName();
OperandEncoding encoding = encodingFromString(typeName, OpSize);
// Adjust the encoding type for an operand based on the instruction.
adjustOperandEncoding(encoding);
Spec->operands[operandIndex].encoding = encoding;
Spec->operands[operandIndex].type = typeFromString(typeName,
HasREX_WPrefix, OpSize);
++operandIndex;
++physicalOperandIndex;
}
void RecognizableInstr::emitInstructionSpecifier() {
Spec->name = Name;
Spec->insnContext = insnContext();
const std::vector<CGIOperandList::OperandInfo> &OperandList = *Operands;
unsigned numOperands = OperandList.size();
unsigned numPhysicalOperands = 0;
// operandMapping maps from operands in OperandList to their originals.
// If operandMapping[i] != i, then the entry is a duplicate.
unsigned operandMapping[X86_MAX_OPERANDS];
assert(numOperands <= X86_MAX_OPERANDS && "X86_MAX_OPERANDS is not large enough");
for (unsigned operandIndex = 0; operandIndex < numOperands; ++operandIndex) {
if (!OperandList[operandIndex].Constraints.empty()) {
const CGIOperandList::ConstraintInfo &Constraint =
OperandList[operandIndex].Constraints[0];
if (Constraint.isTied()) {
operandMapping[operandIndex] = operandIndex;
operandMapping[Constraint.getTiedOperand()] = operandIndex;
} else {
++numPhysicalOperands;
operandMapping[operandIndex] = operandIndex;
}
} else {
++numPhysicalOperands;
operandMapping[operandIndex] = operandIndex;
}
}
#define HANDLE_OPERAND(class) \
handleOperand(false, \
operandIndex, \
physicalOperandIndex, \
numPhysicalOperands, \
operandMapping, \
class##EncodingFromString);
#define HANDLE_OPTIONAL(class) \
handleOperand(true, \
operandIndex, \
physicalOperandIndex, \
numPhysicalOperands, \
operandMapping, \
class##EncodingFromString);
// operandIndex should always be < numOperands
unsigned operandIndex = 0;
// physicalOperandIndex should always be < numPhysicalOperands
unsigned physicalOperandIndex = 0;
// Given the set of prefix bits, how many additional operands does the
// instruction have?
unsigned additionalOperands = 0;
if (HasVEX_4V || HasVEX_4VOp3)
++additionalOperands;
if (HasEVEX_K)
++additionalOperands;
switch (Form) {
default: llvm_unreachable("Unhandled form");
case X86Local::RawFrmSrc:
HANDLE_OPERAND(relocation);
return;
case X86Local::RawFrmDst:
HANDLE_OPERAND(relocation);
return;
case X86Local::RawFrmDstSrc:
HANDLE_OPERAND(relocation);
HANDLE_OPERAND(relocation);
return;
case X86Local::RawFrm:
// Operand 1 (optional) is an address or immediate.
// Operand 2 (optional) is an immediate.
assert(numPhysicalOperands <= 2 &&
"Unexpected number of operands for RawFrm");
HANDLE_OPTIONAL(relocation)
HANDLE_OPTIONAL(immediate)
break;
case X86Local::RawFrmMemOffs:
// Operand 1 is an address.
HANDLE_OPERAND(relocation);
break;
case X86Local::AddRegFrm:
// Operand 1 is added to the opcode.
// Operand 2 (optional) is an address.
assert(numPhysicalOperands >= 1 && numPhysicalOperands <= 2 &&
"Unexpected number of operands for AddRegFrm");
HANDLE_OPERAND(opcodeModifier)
HANDLE_OPTIONAL(relocation)
break;
case X86Local::MRMDestReg:
// Operand 1 is a register operand in the R/M field.
// - In AVX512 there may be a mask operand here -
// Operand 2 is a register operand in the Reg/Opcode field.
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
assert(numPhysicalOperands >= 2 + additionalOperands &&
numPhysicalOperands <= 3 + additionalOperands &&
"Unexpected number of operands for MRMDestRegFrm");
HANDLE_OPERAND(rmRegister)
if (HasEVEX_K)
HANDLE_OPERAND(writemaskRegister)
if (HasVEX_4V)
// FIXME: In AVX, the register below becomes the one encoded
// in ModRMVEX and the one above the one in the VEX.VVVV field
HANDLE_OPERAND(vvvvRegister)
HANDLE_OPERAND(roRegister)
HANDLE_OPTIONAL(immediate)
break;
case X86Local::MRMDestMem:
// Operand 1 is a memory operand (possibly SIB-extended)
// Operand 2 is a register operand in the Reg/Opcode field.
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
assert(numPhysicalOperands >= 2 + additionalOperands &&
numPhysicalOperands <= 3 + additionalOperands &&
"Unexpected number of operands for MRMDestMemFrm with VEX_4V");
HANDLE_OPERAND(memory)
if (HasEVEX_K)
HANDLE_OPERAND(writemaskRegister)
if (HasVEX_4V)
// FIXME: In AVX, the register below becomes the one encoded
// in ModRMVEX and the one above the one in the VEX.VVVV field
HANDLE_OPERAND(vvvvRegister)
HANDLE_OPERAND(roRegister)
HANDLE_OPTIONAL(immediate)
break;
case X86Local::MRMSrcReg:
// Operand 1 is a register operand in the Reg/Opcode field.
// Operand 2 is a register operand in the R/M field.
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
// Operand 4 (optional) is an immediate.
assert(numPhysicalOperands >= 2 + additionalOperands &&
numPhysicalOperands <= 4 + additionalOperands &&
"Unexpected number of operands for MRMSrcRegFrm");
HANDLE_OPERAND(roRegister)
if (HasEVEX_K)
HANDLE_OPERAND(writemaskRegister)
if (HasVEX_4V)
// FIXME: In AVX, the register below becomes the one encoded
// in ModRMVEX and the one above the one in the VEX.VVVV field
HANDLE_OPERAND(vvvvRegister)
if (HasMemOp4Prefix)
HANDLE_OPERAND(immediate)
HANDLE_OPERAND(rmRegister)
if (HasVEX_4VOp3)
HANDLE_OPERAND(vvvvRegister)
if (!HasMemOp4Prefix)
HANDLE_OPTIONAL(immediate)
HANDLE_OPTIONAL(immediate) // above might be a register in 7:4
HANDLE_OPTIONAL(immediate)
break;
case X86Local::MRMSrcMem:
// Operand 1 is a register operand in the Reg/Opcode field.
// Operand 2 is a memory operand (possibly SIB-extended)
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
assert(numPhysicalOperands >= 2 + additionalOperands &&
numPhysicalOperands <= 4 + additionalOperands &&
"Unexpected number of operands for MRMSrcMemFrm");
HANDLE_OPERAND(roRegister)
if (HasEVEX_K)
HANDLE_OPERAND(writemaskRegister)
if (HasVEX_4V)
// FIXME: In AVX, the register below becomes the one encoded
// in ModRMVEX and the one above the one in the VEX.VVVV field
HANDLE_OPERAND(vvvvRegister)
if (HasMemOp4Prefix)
HANDLE_OPERAND(immediate)
HANDLE_OPERAND(memory)
if (HasVEX_4VOp3)
HANDLE_OPERAND(vvvvRegister)
if (!HasMemOp4Prefix)
HANDLE_OPTIONAL(immediate)
HANDLE_OPTIONAL(immediate) // above might be a register in 7:4
break;
case X86Local::MRMXr:
case X86Local::MRM0r:
case X86Local::MRM1r:
case X86Local::MRM2r:
case X86Local::MRM3r:
case X86Local::MRM4r:
case X86Local::MRM5r:
case X86Local::MRM6r:
case X86Local::MRM7r:
// Operand 1 is a register operand in the R/M field.
// Operand 2 (optional) is an immediate or relocation.
// Operand 3 (optional) is an immediate.
assert(numPhysicalOperands >= 0 + additionalOperands &&
numPhysicalOperands <= 3 + additionalOperands &&
"Unexpected number of operands for MRMnr");
if (HasVEX_4V)
HANDLE_OPERAND(vvvvRegister)
if (HasEVEX_K)
HANDLE_OPERAND(writemaskRegister)
HANDLE_OPTIONAL(rmRegister)
HANDLE_OPTIONAL(relocation)
HANDLE_OPTIONAL(immediate)
break;
case X86Local::MRMXm:
case X86Local::MRM0m:
case X86Local::MRM1m:
case X86Local::MRM2m:
case X86Local::MRM3m:
case X86Local::MRM4m:
case X86Local::MRM5m:
case X86Local::MRM6m:
case X86Local::MRM7m:
// Operand 1 is a memory operand (possibly SIB-extended)
// Operand 2 (optional) is an immediate or relocation.
assert(numPhysicalOperands >= 1 + additionalOperands &&
numPhysicalOperands <= 2 + additionalOperands &&
"Unexpected number of operands for MRMnm");
if (HasVEX_4V)
HANDLE_OPERAND(vvvvRegister)
if (HasEVEX_K)
HANDLE_OPERAND(writemaskRegister)
HANDLE_OPERAND(memory)
HANDLE_OPTIONAL(relocation)
break;
case X86Local::RawFrmImm8:
// operand 1 is a 16-bit immediate
// operand 2 is an 8-bit immediate
assert(numPhysicalOperands == 2 &&
"Unexpected number of operands for X86Local::RawFrmImm8");
HANDLE_OPERAND(immediate)
HANDLE_OPERAND(immediate)
break;
case X86Local::RawFrmImm16:
// operand 1 is a 16-bit immediate
// operand 2 is a 16-bit immediate
HANDLE_OPERAND(immediate)
HANDLE_OPERAND(immediate)
break;
case X86Local::MRM_F8:
if (Opcode == 0xc6) {
assert(numPhysicalOperands == 1 &&
"Unexpected number of operands for X86Local::MRM_F8");
HANDLE_OPERAND(immediate)
} else if (Opcode == 0xc7) {
assert(numPhysicalOperands == 1 &&
"Unexpected number of operands for X86Local::MRM_F8");
HANDLE_OPERAND(relocation)
}
break;
case X86Local::MRM_C0: case X86Local::MRM_C1: case X86Local::MRM_C2:
case X86Local::MRM_C3: case X86Local::MRM_C4: case X86Local::MRM_C8:
case X86Local::MRM_C9: case X86Local::MRM_CA: case X86Local::MRM_CB:
case X86Local::MRM_CF: case X86Local::MRM_D0: case X86Local::MRM_D1:
case X86Local::MRM_D4: case X86Local::MRM_D5: case X86Local::MRM_D6:
case X86Local::MRM_D7: case X86Local::MRM_D8: case X86Local::MRM_D9:
case X86Local::MRM_DA: case X86Local::MRM_DB: case X86Local::MRM_DC:
case X86Local::MRM_DD: case X86Local::MRM_DE: case X86Local::MRM_DF:
case X86Local::MRM_E0: case X86Local::MRM_E1: case X86Local::MRM_E2:
case X86Local::MRM_E3: case X86Local::MRM_E4: case X86Local::MRM_E5:
case X86Local::MRM_E8: case X86Local::MRM_E9: case X86Local::MRM_EA:
case X86Local::MRM_EB: case X86Local::MRM_EC: case X86Local::MRM_ED:
case X86Local::MRM_EE: case X86Local::MRM_F0: case X86Local::MRM_F1:
case X86Local::MRM_F2: case X86Local::MRM_F3: case X86Local::MRM_F4:
case X86Local::MRM_F5: case X86Local::MRM_F6: case X86Local::MRM_F7:
case X86Local::MRM_F9: case X86Local::MRM_FA: case X86Local::MRM_FB:
case X86Local::MRM_FC: case X86Local::MRM_FD: case X86Local::MRM_FE:
case X86Local::MRM_FF:
// Ignored.
break;
}
#undef HANDLE_OPERAND
#undef HANDLE_OPTIONAL
}
void RecognizableInstr::emitDecodePath(DisassemblerTables &tables) const {
// Special cases where the LLVM tables are not complete
#define MAP(from, to) \
case X86Local::MRM_##from:
OpcodeType opcodeType = (OpcodeType)-1;
ModRMFilter* filter = nullptr;
uint8_t opcodeToSet = 0;
switch (OpMap) {
default: llvm_unreachable("Invalid map!");
case X86Local::OB:
case X86Local::TB:
case X86Local::T8:
case X86Local::TA:
case X86Local::XOP8:
case X86Local::XOP9:
case X86Local::XOPA:
switch (OpMap) {
default: llvm_unreachable("Unexpected map!");
case X86Local::OB: opcodeType = ONEBYTE; break;
case X86Local::TB: opcodeType = TWOBYTE; break;
case X86Local::T8: opcodeType = THREEBYTE_38; break;
case X86Local::TA: opcodeType = THREEBYTE_3A; break;
case X86Local::XOP8: opcodeType = XOP8_MAP; break;
case X86Local::XOP9: opcodeType = XOP9_MAP; break;
case X86Local::XOPA: opcodeType = XOPA_MAP; break;
}
switch (Form) {
default:
filter = new DumbFilter();
break;
case X86Local::MRMDestReg: case X86Local::MRMDestMem:
case X86Local::MRMSrcReg: case X86Local::MRMSrcMem:
case X86Local::MRMXr: case X86Local::MRMXm:
filter = new ModFilter(isRegFormat(Form));
break;
case X86Local::MRM0r: case X86Local::MRM1r:
case X86Local::MRM2r: case X86Local::MRM3r:
case X86Local::MRM4r: case X86Local::MRM5r:
case X86Local::MRM6r: case X86Local::MRM7r:
filter = new ExtendedFilter(true, Form - X86Local::MRM0r);
break;
case X86Local::MRM0m: case X86Local::MRM1m:
case X86Local::MRM2m: case X86Local::MRM3m:
case X86Local::MRM4m: case X86Local::MRM5m:
case X86Local::MRM6m: case X86Local::MRM7m:
filter = new ExtendedFilter(false, Form - X86Local::MRM0m);
break;
MRM_MAPPING
filter = new ExactFilter(0xC0 + Form - X86Local::MRM_C0); \
break;
} // switch (Form)
opcodeToSet = Opcode;
break;
} // switch (OpMap)
unsigned AddressSize = 0;
switch (AdSize) {
case X86Local::AdSize16: AddressSize = 16; break;
case X86Local::AdSize32: AddressSize = 32; break;
case X86Local::AdSize64: AddressSize = 64; break;
}
assert(opcodeType != (OpcodeType)-1 &&
"Opcode type not set");
assert(filter && "Filter not set");
if (Form == X86Local::AddRegFrm) {
assert(((opcodeToSet & 7) == 0) &&
"ADDREG_FRM opcode not aligned");
uint8_t currentOpcode;
for (currentOpcode = opcodeToSet;
currentOpcode < opcodeToSet + 8;
++currentOpcode)
tables.setTableFields(opcodeType,
insnContext(),
currentOpcode,
*filter,
UID, Is32Bit, IgnoresVEX_L, AddressSize);
} else {
tables.setTableFields(opcodeType,
insnContext(),
opcodeToSet,
*filter,
UID, Is32Bit, IgnoresVEX_L, AddressSize);
}
delete filter;
#undef MAP
}
#define TYPE(str, type) if (s == str) return type;
OperandType RecognizableInstr::typeFromString(const std::string &s,
bool hasREX_WPrefix,
uint8_t OpSize) {
if(hasREX_WPrefix) {
// For instructions with a REX_W prefix, a declared 32-bit register encoding
// is special.
TYPE("GR32", TYPE_R32)
}
if(OpSize == X86Local::OpSize16) {
// For OpSize16 instructions, a declared 16-bit register or
// immediate encoding is special.
TYPE("GR16", TYPE_Rv)
TYPE("i16imm", TYPE_IMMv)
} else if(OpSize == X86Local::OpSize32) {
// For OpSize32 instructions, a declared 32-bit register or
// immediate encoding is special.
TYPE("GR32", TYPE_Rv)
}
TYPE("i16mem", TYPE_Mv)
TYPE("i16imm", TYPE_IMM16)
TYPE("i16i8imm", TYPE_IMMv)
TYPE("GR16", TYPE_R16)
TYPE("i32mem", TYPE_Mv)
TYPE("i32imm", TYPE_IMMv)
TYPE("i32i8imm", TYPE_IMM32)
TYPE("GR32", TYPE_R32)
TYPE("GR32orGR64", TYPE_R32)
TYPE("i64mem", TYPE_Mv)
TYPE("i64i32imm", TYPE_IMM64)
TYPE("i64i8imm", TYPE_IMM64)
TYPE("GR64", TYPE_R64)
TYPE("i8mem", TYPE_M8)
TYPE("i8imm", TYPE_IMM8)
TYPE("u8imm", TYPE_UIMM8)
TYPE("i32u8imm", TYPE_UIMM8)
TYPE("GR8", TYPE_R8)
TYPE("VR128", TYPE_XMM128)
TYPE("VR128X", TYPE_XMM128)
TYPE("f128mem", TYPE_M128)
TYPE("f256mem", TYPE_M256)
TYPE("f512mem", TYPE_M512)
TYPE("FR64", TYPE_XMM64)
TYPE("FR64X", TYPE_XMM64)
TYPE("f64mem", TYPE_M64FP)
TYPE("sdmem", TYPE_M64FP)
TYPE("FR32", TYPE_XMM32)
TYPE("FR32X", TYPE_XMM32)
TYPE("f32mem", TYPE_M32FP)
TYPE("ssmem", TYPE_M32FP)
TYPE("RST", TYPE_ST)
TYPE("i128mem", TYPE_M128)
TYPE("i256mem", TYPE_M256)
TYPE("i512mem", TYPE_M512)
TYPE("i64i32imm_pcrel", TYPE_REL64)
TYPE("i16imm_pcrel", TYPE_REL16)
TYPE("i32imm_pcrel", TYPE_REL32)
TYPE("SSECC", TYPE_IMM3)
TYPE("XOPCC", TYPE_IMM3)
TYPE("AVXCC", TYPE_IMM5)
TYPE("AVX512ICC", TYPE_AVX512ICC)
TYPE("AVX512RC", TYPE_IMM32)
TYPE("brtarget32", TYPE_RELv)
TYPE("brtarget16", TYPE_RELv)
TYPE("brtarget8", TYPE_REL8)
TYPE("f80mem", TYPE_M80FP)
TYPE("lea64_32mem", TYPE_LEA)
TYPE("lea64mem", TYPE_LEA)
TYPE("VR64", TYPE_MM64)
TYPE("i64imm", TYPE_IMMv)
TYPE("anymem", TYPE_M)
TYPE("opaque32mem", TYPE_M1616)
TYPE("opaque48mem", TYPE_M1632)
TYPE("opaque80mem", TYPE_M1664)
TYPE("opaque512mem", TYPE_M512)
TYPE("SEGMENT_REG", TYPE_SEGMENTREG)
TYPE("DEBUG_REG", TYPE_DEBUGREG)
TYPE("CONTROL_REG", TYPE_CONTROLREG)
TYPE("srcidx8", TYPE_SRCIDX8)
TYPE("srcidx16", TYPE_SRCIDX16)
TYPE("srcidx32", TYPE_SRCIDX32)
TYPE("srcidx64", TYPE_SRCIDX64)
TYPE("dstidx8", TYPE_DSTIDX8)
TYPE("dstidx16", TYPE_DSTIDX16)
TYPE("dstidx32", TYPE_DSTIDX32)
TYPE("dstidx64", TYPE_DSTIDX64)
TYPE("offset16_8", TYPE_MOFFS8)
TYPE("offset16_16", TYPE_MOFFS16)
TYPE("offset16_32", TYPE_MOFFS32)
TYPE("offset32_8", TYPE_MOFFS8)
TYPE("offset32_16", TYPE_MOFFS16)
TYPE("offset32_32", TYPE_MOFFS32)
TYPE("offset32_64", TYPE_MOFFS64)
TYPE("offset64_8", TYPE_MOFFS8)
TYPE("offset64_16", TYPE_MOFFS16)
TYPE("offset64_32", TYPE_MOFFS32)
TYPE("offset64_64", TYPE_MOFFS64)
TYPE("VR256", TYPE_XMM256)
TYPE("VR256X", TYPE_XMM256)
TYPE("VR512", TYPE_XMM512)
TYPE("VK1", TYPE_VK1)
TYPE("VK1WM", TYPE_VK1)
TYPE("VK2", TYPE_VK2)
TYPE("VK2WM", TYPE_VK2)
TYPE("VK4", TYPE_VK4)
TYPE("VK4WM", TYPE_VK4)
TYPE("VK8", TYPE_VK8)
TYPE("VK8WM", TYPE_VK8)
TYPE("VK16", TYPE_VK16)
TYPE("VK16WM", TYPE_VK16)
TYPE("VK32", TYPE_VK32)
TYPE("VK32WM", TYPE_VK32)
TYPE("VK64", TYPE_VK64)
TYPE("VK64WM", TYPE_VK64)
TYPE("GR16_NOAX", TYPE_Rv)
TYPE("GR32_NOAX", TYPE_Rv)
TYPE("GR64_NOAX", TYPE_R64)
TYPE("vx32mem", TYPE_M32)
TYPE("vx32xmem", TYPE_M32)
TYPE("vy32mem", TYPE_M32)
TYPE("vy32xmem", TYPE_M32)
TYPE("vz32mem", TYPE_M32)
TYPE("vx64mem", TYPE_M64)
TYPE("vx64xmem", TYPE_M64)
TYPE("vy64mem", TYPE_M64)
TYPE("vy64xmem", TYPE_M64)
TYPE("vz64mem", TYPE_M64)
TYPE("BNDR", TYPE_BNDR)
errs() << "Unhandled type string " << s << "\n";
llvm_unreachable("Unhandled type string");
}
#undef TYPE
#define ENCODING(str, encoding) if (s == str) return encoding;
OperandEncoding
RecognizableInstr::immediateEncodingFromString(const std::string &s,
uint8_t OpSize) {
if(OpSize != X86Local::OpSize16) {
// For instructions without an OpSize prefix, a declared 16-bit register or
// immediate encoding is special.
ENCODING("i16imm", ENCODING_IW)
}
ENCODING("i32i8imm", ENCODING_IB)
ENCODING("SSECC", ENCODING_IB)
ENCODING("XOPCC", ENCODING_IB)
ENCODING("AVXCC", ENCODING_IB)
ENCODING("AVX512ICC", ENCODING_IB)
ENCODING("AVX512RC", ENCODING_IB)
ENCODING("i16imm", ENCODING_Iv)
ENCODING("i16i8imm", ENCODING_IB)
ENCODING("i32imm", ENCODING_Iv)
ENCODING("i64i32imm", ENCODING_ID)
ENCODING("i64i8imm", ENCODING_IB)
ENCODING("i8imm", ENCODING_IB)
ENCODING("u8imm", ENCODING_IB)
ENCODING("i32u8imm", ENCODING_IB)
// This is not a typo. Instructions like BLENDVPD put
// register IDs in 8-bit immediates nowadays.
ENCODING("FR32", ENCODING_IB)
ENCODING("FR64", ENCODING_IB)
ENCODING("VR128", ENCODING_IB)
ENCODING("VR256", ENCODING_IB)
ENCODING("FR32X", ENCODING_IB)
ENCODING("FR64X", ENCODING_IB)
ENCODING("VR128X", ENCODING_IB)
ENCODING("VR256X", ENCODING_IB)
ENCODING("VR512", ENCODING_IB)
errs() << "Unhandled immediate encoding " << s << "\n";
llvm_unreachable("Unhandled immediate encoding");
}
OperandEncoding
RecognizableInstr::rmRegisterEncodingFromString(const std::string &s,
uint8_t OpSize) {
ENCODING("RST", ENCODING_FP)
ENCODING("GR16", ENCODING_RM)
ENCODING("GR32", ENCODING_RM)
ENCODING("GR32orGR64", ENCODING_RM)
ENCODING("GR64", ENCODING_RM)
ENCODING("GR8", ENCODING_RM)
ENCODING("VR128", ENCODING_RM)
ENCODING("VR128X", ENCODING_RM)
ENCODING("FR64", ENCODING_RM)
ENCODING("FR32", ENCODING_RM)
ENCODING("FR64X", ENCODING_RM)
ENCODING("FR32X", ENCODING_RM)
ENCODING("VR64", ENCODING_RM)
ENCODING("VR256", ENCODING_RM)
ENCODING("VR256X", ENCODING_RM)
ENCODING("VR512", ENCODING_RM)
ENCODING("VK1", ENCODING_RM)
ENCODING("VK2", ENCODING_RM)
ENCODING("VK4", ENCODING_RM)
ENCODING("VK8", ENCODING_RM)
ENCODING("VK16", ENCODING_RM)
ENCODING("VK32", ENCODING_RM)
ENCODING("VK64", ENCODING_RM)
ENCODING("BNDR", ENCODING_RM)
errs() << "Unhandled R/M register encoding " << s << "\n";
llvm_unreachable("Unhandled R/M register encoding");
}
OperandEncoding
RecognizableInstr::roRegisterEncodingFromString(const std::string &s,
uint8_t OpSize) {
ENCODING("GR16", ENCODING_REG)
ENCODING("GR32", ENCODING_REG)
ENCODING("GR32orGR64", ENCODING_REG)
ENCODING("GR64", ENCODING_REG)
ENCODING("GR8", ENCODING_REG)
ENCODING("VR128", ENCODING_REG)
ENCODING("FR64", ENCODING_REG)
ENCODING("FR32", ENCODING_REG)
ENCODING("VR64", ENCODING_REG)
ENCODING("SEGMENT_REG", ENCODING_REG)
ENCODING("DEBUG_REG", ENCODING_REG)
ENCODING("CONTROL_REG", ENCODING_REG)
ENCODING("VR256", ENCODING_REG)
ENCODING("VR256X", ENCODING_REG)
ENCODING("VR128X", ENCODING_REG)
ENCODING("FR64X", ENCODING_REG)
ENCODING("FR32X", ENCODING_REG)
ENCODING("VR512", ENCODING_REG)
ENCODING("VK1", ENCODING_REG)
ENCODING("VK2", ENCODING_REG)
ENCODING("VK4", ENCODING_REG)
ENCODING("VK8", ENCODING_REG)
ENCODING("VK16", ENCODING_REG)
ENCODING("VK32", ENCODING_REG)
ENCODING("VK64", ENCODING_REG)
ENCODING("VK1WM", ENCODING_REG)
ENCODING("VK2WM", ENCODING_REG)
ENCODING("VK4WM", ENCODING_REG)
ENCODING("VK8WM", ENCODING_REG)
ENCODING("VK16WM", ENCODING_REG)
ENCODING("VK32WM", ENCODING_REG)
ENCODING("VK64WM", ENCODING_REG)
ENCODING("BNDR", ENCODING_REG)
errs() << "Unhandled reg/opcode register encoding " << s << "\n";
llvm_unreachable("Unhandled reg/opcode register encoding");
}
OperandEncoding
RecognizableInstr::vvvvRegisterEncodingFromString(const std::string &s,
uint8_t OpSize) {
ENCODING("GR32", ENCODING_VVVV)
ENCODING("GR64", ENCODING_VVVV)
ENCODING("FR32", ENCODING_VVVV)
ENCODING("FR64", ENCODING_VVVV)
ENCODING("VR128", ENCODING_VVVV)
ENCODING("VR256", ENCODING_VVVV)
ENCODING("FR32X", ENCODING_VVVV)
ENCODING("FR64X", ENCODING_VVVV)
ENCODING("VR128X", ENCODING_VVVV)
ENCODING("VR256X", ENCODING_VVVV)
ENCODING("VR512", ENCODING_VVVV)
ENCODING("VK1", ENCODING_VVVV)
ENCODING("VK2", ENCODING_VVVV)
ENCODING("VK4", ENCODING_VVVV)
ENCODING("VK8", ENCODING_VVVV)
ENCODING("VK16", ENCODING_VVVV)
ENCODING("VK32", ENCODING_VVVV)
ENCODING("VK64", ENCODING_VVVV)
errs() << "Unhandled VEX.vvvv register encoding " << s << "\n";
llvm_unreachable("Unhandled VEX.vvvv register encoding");
}
OperandEncoding
RecognizableInstr::writemaskRegisterEncodingFromString(const std::string &s,
uint8_t OpSize) {
ENCODING("VK1WM", ENCODING_WRITEMASK)
ENCODING("VK2WM", ENCODING_WRITEMASK)
ENCODING("VK4WM", ENCODING_WRITEMASK)
ENCODING("VK8WM", ENCODING_WRITEMASK)
ENCODING("VK16WM", ENCODING_WRITEMASK)
ENCODING("VK32WM", ENCODING_WRITEMASK)
ENCODING("VK64WM", ENCODING_WRITEMASK)
errs() << "Unhandled mask register encoding " << s << "\n";
llvm_unreachable("Unhandled mask register encoding");
}
OperandEncoding
RecognizableInstr::memoryEncodingFromString(const std::string &s,
uint8_t OpSize) {
ENCODING("i16mem", ENCODING_RM)
ENCODING("i32mem", ENCODING_RM)
ENCODING("i64mem", ENCODING_RM)
ENCODING("i8mem", ENCODING_RM)
ENCODING("ssmem", ENCODING_RM)
ENCODING("sdmem", ENCODING_RM)
ENCODING("f128mem", ENCODING_RM)
ENCODING("f256mem", ENCODING_RM)
ENCODING("f512mem", ENCODING_RM)
ENCODING("f64mem", ENCODING_RM)
ENCODING("f32mem", ENCODING_RM)
ENCODING("i128mem", ENCODING_RM)
ENCODING("i256mem", ENCODING_RM)
ENCODING("i512mem", ENCODING_RM)
ENCODING("f80mem", ENCODING_RM)
ENCODING("lea64_32mem", ENCODING_RM)
ENCODING("lea64mem", ENCODING_RM)
ENCODING("anymem", ENCODING_RM)
ENCODING("opaque32mem", ENCODING_RM)
ENCODING("opaque48mem", ENCODING_RM)
ENCODING("opaque80mem", ENCODING_RM)
ENCODING("opaque512mem", ENCODING_RM)
ENCODING("vx32mem", ENCODING_RM)
ENCODING("vx32xmem", ENCODING_RM)
ENCODING("vy32mem", ENCODING_RM)
ENCODING("vy32xmem", ENCODING_RM)
ENCODING("vz32mem", ENCODING_RM)
ENCODING("vx64mem", ENCODING_RM)
ENCODING("vx64xmem", ENCODING_RM)
ENCODING("vy64mem", ENCODING_RM)
ENCODING("vy64xmem", ENCODING_RM)
ENCODING("vz64mem", ENCODING_RM)
errs() << "Unhandled memory encoding " << s << "\n";
llvm_unreachable("Unhandled memory encoding");
}
OperandEncoding
RecognizableInstr::relocationEncodingFromString(const std::string &s,
uint8_t OpSize) {
if(OpSize != X86Local::OpSize16) {
// For instructions without an OpSize prefix, a declared 16-bit register or
// immediate encoding is special.
ENCODING("i16imm", ENCODING_IW)
}
ENCODING("i16imm", ENCODING_Iv)
ENCODING("i16i8imm", ENCODING_IB)
ENCODING("i32imm", ENCODING_Iv)
ENCODING("i32i8imm", ENCODING_IB)
ENCODING("i64i32imm", ENCODING_ID)
ENCODING("i64i8imm", ENCODING_IB)
ENCODING("i8imm", ENCODING_IB)
ENCODING("u8imm", ENCODING_IB)
ENCODING("i32u8imm", ENCODING_IB)
ENCODING("i64i32imm_pcrel", ENCODING_ID)
ENCODING("i16imm_pcrel", ENCODING_IW)
ENCODING("i32imm_pcrel", ENCODING_ID)
ENCODING("brtarget32", ENCODING_Iv)
ENCODING("brtarget16", ENCODING_Iv)
ENCODING("brtarget8", ENCODING_IB)
ENCODING("i64imm", ENCODING_IO)
ENCODING("offset16_8", ENCODING_Ia)
ENCODING("offset16_16", ENCODING_Ia)
ENCODING("offset16_32", ENCODING_Ia)
ENCODING("offset32_8", ENCODING_Ia)
ENCODING("offset32_16", ENCODING_Ia)
ENCODING("offset32_32", ENCODING_Ia)
ENCODING("offset32_64", ENCODING_Ia)
ENCODING("offset64_8", ENCODING_Ia)
ENCODING("offset64_16", ENCODING_Ia)
ENCODING("offset64_32", ENCODING_Ia)
ENCODING("offset64_64", ENCODING_Ia)
ENCODING("srcidx8", ENCODING_SI)
ENCODING("srcidx16", ENCODING_SI)
ENCODING("srcidx32", ENCODING_SI)
ENCODING("srcidx64", ENCODING_SI)
ENCODING("dstidx8", ENCODING_DI)
ENCODING("dstidx16", ENCODING_DI)
ENCODING("dstidx32", ENCODING_DI)
ENCODING("dstidx64", ENCODING_DI)
errs() << "Unhandled relocation encoding " << s << "\n";
llvm_unreachable("Unhandled relocation encoding");
}
OperandEncoding
RecognizableInstr::opcodeModifierEncodingFromString(const std::string &s,
uint8_t OpSize) {
ENCODING("GR32", ENCODING_Rv)
ENCODING("GR64", ENCODING_RO)
ENCODING("GR16", ENCODING_Rv)
ENCODING("GR8", ENCODING_RB)
ENCODING("GR16_NOAX", ENCODING_Rv)
ENCODING("GR32_NOAX", ENCODING_Rv)
ENCODING("GR64_NOAX", ENCODING_RO)
errs() << "Unhandled opcode modifier encoding " << s << "\n";
llvm_unreachable("Unhandled opcode modifier encoding");
}
#undef ENCODING
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/DisassemblerEmitter.cpp | //===- DisassemblerEmitter.cpp - Generate a disassembler ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "CodeGenTarget.h"
// #include "X86DisassemblerTables.h" // HLSL Change
// #include "X86RecognizableInstr.h" // HLSL Change
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
using namespace llvm;
// using namespace llvm::X86Disassembler; // HLSL Change
/// DisassemblerEmitter - Contains disassembler table emitters for various
/// architectures.
/// X86 Disassembler Emitter
///
/// *** IF YOU'RE HERE TO RESOLVE A "Primary decode conflict", LOOK DOWN NEAR
/// THE END OF THIS COMMENT!
///
/// The X86 disassembler emitter is part of the X86 Disassembler, which is
/// documented in lib/Target/X86/X86Disassembler.h.
///
/// The emitter produces the tables that the disassembler uses to translate
/// instructions. The emitter generates the following tables:
///
/// - One table (CONTEXTS_SYM) that contains a mapping of attribute masks to
/// instruction contexts. Although for each attribute there are cases where
/// that attribute determines decoding, in the majority of cases decoding is
/// the same whether or not an attribute is present. For example, a 64-bit
/// instruction with an OPSIZE prefix and an XS prefix decodes the same way in
/// all cases as a 64-bit instruction with only OPSIZE set. (The XS prefix
/// may have effects on its execution, but does not change the instruction
/// returned.) This allows considerable space savings in other tables.
/// - Six tables (ONEBYTE_SYM, TWOBYTE_SYM, THREEBYTE38_SYM, THREEBYTE3A_SYM,
/// THREEBYTEA6_SYM, and THREEBYTEA7_SYM contain the hierarchy that the
/// decoder traverses while decoding an instruction. At the lowest level of
/// this hierarchy are instruction UIDs, 16-bit integers that can be used to
/// uniquely identify the instruction and correspond exactly to its position
/// in the list of CodeGenInstructions for the target.
/// - One table (INSTRUCTIONS_SYM) contains information about the operands of
/// each instruction and how to decode them.
///
/// During table generation, there may be conflicts between instructions that
/// occupy the same space in the decode tables. These conflicts are resolved as
/// follows in setTableFields() (X86DisassemblerTables.cpp)
///
/// - If the current context is the native context for one of the instructions
/// (that is, the attributes specified for it in the LLVM tables specify
/// precisely the current context), then it has priority.
/// - If the current context isn't native for either of the instructions, then
/// the higher-priority context wins (that is, the one that is more specific).
/// That hierarchy is determined by outranks() (X86DisassemblerTables.cpp)
/// - If the current context is native for both instructions, then the table
/// emitter reports a conflict and dies.
///
/// *** RESOLUTION FOR "Primary decode conflict"S
///
/// If two instructions collide, typically the solution is (in order of
/// likelihood):
///
/// (1) to filter out one of the instructions by editing filter()
/// (X86RecognizableInstr.cpp). This is the most common resolution, but
/// check the Intel manuals first to make sure that (2) and (3) are not the
/// problem.
/// (2) to fix the tables (X86.td and its subsidiaries) so the opcodes are
/// accurate. Sometimes they are not.
/// (3) to fix the tables to reflect the actual context (for example, required
/// prefixes), and possibly to add a new context by editing
/// lib/Target/X86/X86DisassemblerDecoderCommon.h. This is unlikely to be
/// the cause.
///
/// DisassemblerEmitter.cpp contains the implementation for the emitter,
/// which simply pulls out instructions from the CodeGenTarget and pushes them
/// into X86DisassemblerTables.
/// X86DisassemblerTables.h contains the interface for the instruction tables,
/// which manage and emit the structures discussed above.
/// X86DisassemblerTables.cpp contains the implementation for the instruction
/// tables.
/// X86ModRMFilters.h contains filters that can be used to determine which
/// ModR/M values are valid for a particular instruction. These are used to
/// populate ModRMDecisions.
/// X86RecognizableInstr.h contains the interface for a single instruction,
/// which knows how to translate itself from a CodeGenInstruction and provide
/// the information necessary for integration into the tables.
/// X86RecognizableInstr.cpp contains the implementation for a single
/// instruction.
namespace llvm {
extern void EmitFixedLenDecoder(RecordKeeper &RK, raw_ostream &OS,
std::string PredicateNamespace,
std::string GPrefix,
std::string GPostfix,
std::string ROK,
std::string RFail,
std::string L);
void EmitDisassembler(RecordKeeper &Records, raw_ostream &OS) {
CodeGenTarget Target(Records);
emitSourceFileHeader(" * " + Target.getName() + " Disassembler", OS);
#if 0 // HLSL Change
// X86 uses a custom disassembler.
if (Target.getName() == "X86") {
DisassemblerTables Tables;
const std::vector<const CodeGenInstruction*> &numberedInstructions =
Target.getInstructionsByEnumValue();
for (unsigned i = 0, e = numberedInstructions.size(); i != e; ++i)
RecognizableInstr::processInstr(Tables, *numberedInstructions[i], i);
if (Tables.hasConflicts()) {
PrintError(Target.getTargetRecord()->getLoc(), "Primary decode conflict");
return;
}
Tables.emit(OS);
return;
}
// ARM and Thumb have a CHECK() macro to deal with DecodeStatuses.
if (Target.getName() == "ARM" || Target.getName() == "Thumb" ||
Target.getName() == "AArch64" || Target.getName() == "ARM64") {
std::string PredicateNamespace = Target.getName();
if (PredicateNamespace == "Thumb")
PredicateNamespace = "ARM";
EmitFixedLenDecoder(Records, OS, PredicateNamespace,
"if (!Check(S, ", ")) return MCDisassembler::Fail;",
"S", "MCDisassembler::Fail",
" MCDisassembler::DecodeStatus S = "
"MCDisassembler::Success;\n(void)S;");
return;
}
#endif // HLSL Change
EmitFixedLenDecoder(Records, OS, Target.getName(),
"if (", " == MCDisassembler::Fail)"
" return MCDisassembler::Fail;",
"MCDisassembler::Success", "MCDisassembler::Fail", "");
}
} // End llvm namespace
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/TableGen/TableGen.cpp | //===- TableGen.cpp - Top-Level TableGen implementation for LLVM ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the main function for LLVM's TableGen.
//
//===----------------------------------------------------------------------===//
#include "TableGenBackends.h" // Declares all backends.
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Signals.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Main.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/SetTheory.h"
// HLSL Change Starts
#ifdef _WIN32
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#endif
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MSFileSystem.h"
// HLSL Change Ends
using namespace llvm;
enum ActionType {
PrintRecords,
GenEmitter,
GenRegisterInfo,
GenInstrInfo,
GenAsmWriter,
GenAsmMatcher,
GenDisassembler,
GenPseudoLowering,
GenCallingConv,
GenDAGISel,
GenDFAPacketizer,
GenFastISel,
GenSubtarget,
GenIntrinsic,
GenTgtIntrinsic,
PrintEnums,
PrintSets,
GenOptParserDefs,
GenCTags
};
namespace {
cl::opt<ActionType>
Action(cl::desc("Action to perform:"),
cl::values(clEnumValN(PrintRecords, "print-records",
"Print all records to stdout (default)"),
clEnumValN(GenEmitter, "gen-emitter",
"Generate machine code emitter"),
clEnumValN(GenRegisterInfo, "gen-register-info",
"Generate registers and register classes info"),
clEnumValN(GenInstrInfo, "gen-instr-info",
"Generate instruction descriptions"),
clEnumValN(GenCallingConv, "gen-callingconv",
"Generate calling convention descriptions"),
clEnumValN(GenAsmWriter, "gen-asm-writer",
"Generate assembly writer"),
clEnumValN(GenDisassembler, "gen-disassembler",
"Generate disassembler"),
clEnumValN(GenPseudoLowering, "gen-pseudo-lowering",
"Generate pseudo instruction lowering"),
clEnumValN(GenAsmMatcher, "gen-asm-matcher",
"Generate assembly instruction matcher"),
clEnumValN(GenDAGISel, "gen-dag-isel",
"Generate a DAG instruction selector"),
clEnumValN(GenDFAPacketizer, "gen-dfa-packetizer",
"Generate DFA Packetizer for VLIW targets"),
clEnumValN(GenFastISel, "gen-fast-isel",
"Generate a \"fast\" instruction selector"),
clEnumValN(GenSubtarget, "gen-subtarget",
"Generate subtarget enumerations"),
clEnumValN(GenIntrinsic, "gen-intrinsic",
"Generate intrinsic information"),
clEnumValN(GenTgtIntrinsic, "gen-tgt-intrinsic",
"Generate target intrinsic information"),
clEnumValN(PrintEnums, "print-enums",
"Print enum values for a class"),
clEnumValN(PrintSets, "print-sets",
"Print expanded sets for testing DAG exprs"),
clEnumValN(GenOptParserDefs, "gen-opt-parser-defs",
"Generate option definitions"),
clEnumValN(GenCTags, "gen-ctags",
"Generate ctags-compatible index"),
clEnumValEnd));
cl::opt<std::string>
Class("class", cl::desc("Print Enum list for this class"),
cl::value_desc("class name"));
bool LLVMTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
switch (Action) {
case PrintRecords:
OS << Records; // No argument, dump all contents
break;
case GenEmitter:
EmitCodeEmitter(Records, OS);
break;
case GenRegisterInfo:
EmitRegisterInfo(Records, OS);
break;
case GenInstrInfo:
EmitInstrInfo(Records, OS);
break;
case GenCallingConv:
EmitCallingConv(Records, OS);
break;
case GenAsmWriter:
EmitAsmWriter(Records, OS);
break;
case GenAsmMatcher:
EmitAsmMatcher(Records, OS);
break;
case GenDisassembler:
EmitDisassembler(Records, OS);
break;
case GenPseudoLowering:
EmitPseudoLowering(Records, OS);
break;
case GenDAGISel:
EmitDAGISel(Records, OS);
break;
case GenDFAPacketizer:
EmitDFAPacketizer(Records, OS);
break;
case GenFastISel:
EmitFastISel(Records, OS);
break;
case GenSubtarget:
EmitSubtarget(Records, OS);
break;
case GenIntrinsic:
EmitIntrinsics(Records, OS);
break;
case GenTgtIntrinsic:
EmitIntrinsics(Records, OS, true);
break;
case GenOptParserDefs:
EmitOptParser(Records, OS);
break;
case PrintEnums:
{
for (Record *Rec : Records.getAllDerivedDefinitions(Class))
OS << Rec->getName() << ", ";
OS << "\n";
break;
}
case PrintSets:
{
SetTheory Sets;
Sets.addFieldExpander("Set", "Elements");
for (Record *Rec : Records.getAllDerivedDefinitions("Set")) {
OS << Rec->getName() << " = [";
const std::vector<Record*> *Elts = Sets.expand(Rec);
assert(Elts && "Couldn't expand Set instance");
for (Record *Elt : *Elts)
OS << ' ' << Elt->getName();
OS << " ]\n";
}
break;
}
case GenCTags:
EmitCTags(Records, OS);
break;
}
return false;
}
}
int main(int argc, char **argv) {
// HLSL Change Starts
if (std::error_code ec = llvm::sys::fs::SetupPerThreadFileSystem())
return 1;
llvm::sys::fs::AutoCleanupPerThreadFileSystem auto_cleanup_fs;
llvm::sys::fs::MSFileSystem* msfPtr;
HRESULT hr;
if (!SUCCEEDED(hr = CreateMSFileSystemForDisk(&msfPtr)))
return 1;
std::unique_ptr<llvm::sys::fs::MSFileSystem> msf(msfPtr);
llvm::sys::fs::AutoPerThreadSystem pts(msf.get());
// HLSL Change Ends
// sys::PrintStackTraceOnErrorSignal(); // HLSL Change
// PrettyStackTraceProgram X(argc, argv); // HLSL Change
cl::ParseCommandLineOptions(argc, argv);
return TableGenMain(argv[0], &LLVMTableGenMain);
}
#ifdef __has_feature
#if __has_feature(address_sanitizer)
#include <sanitizer/lsan_interface.h>
// Disable LeakSanitizer for this binary as it has too many leaks that are not
// very interesting to fix. See compiler-rt/include/sanitizer/lsan_interface.h .
int __lsan_is_turned_off() { return 1; }
#endif // __has_feature(address_sanitizer)
#endif // defined(__has_feature)
|
0 | repos/DirectXShaderCompiler/utils/textmate | repos/DirectXShaderCompiler/utils/textmate/TableGen.tmbundle/info.plist | <?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>name</key>
<string>TableGen</string>
<key>ordering</key>
<array/>
<key>uuid</key>
<string>96925448-7219-41E9-A7F0-8D5B70E9B877</string>
</dict>
</plist>
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/jedit/tablegen.xml | <?xml version="1.0"?>
<!DOCTYPE MODE SYSTEM "xmode.dtd">
<MODE>
<PROPS>
<PROPERTY NAME="lineComment" VALUE="//" />
<PROPERTY NAME="commentStart" VALUE="/*" />
<PROPERTY NAME="commentEnd" VALUE="*/" />
<PROPERTY NAME="indentOpenBrackets" VALUE="{" />
<PROPERTY NAME="indentCloseBrackets" VALUE="}" />
<PROPERTY NAME="wordBreakChars" VALUE=",+-=<>/?^&*" />
<PROPERTY NAME="unalignedOpenBrackets" VALUE="(<" />
<PROPERTY NAME="unalignedCloseBrackets" VALUE=")>" />
</PROPS>
<RULES IGNORE_CASE="FALSE" HIGHLIGHT_DIGITS="TRUE">
<EOL_SPAN TYPE="COMMENT1">//</EOL_SPAN>
<SPAN TYPE="COMMENT1">
<BEGIN>/*</BEGIN>
<END>*/</END>
</SPAN>
<SPAN TYPE="LITERAL1" NO_LINE_BREAK="TRUE" ESCAPE="\">
<BEGIN>"</BEGIN>
<END>"</END>
</SPAN>
<KEYWORDS>
<KEYWORD1>let</KEYWORD1>
<KEYWORD1>def</KEYWORD1>
<KEYWORD1>class</KEYWORD1>
<KEYWORD1>include</KEYWORD1>
<KEYWORD3>bit</KEYWORD3>
<KEYWORD3>int</KEYWORD3>
<KEYWORD3>string</KEYWORD3>
<KEYWORD3>bits</KEYWORD3>
<KEYWORD3>list</KEYWORD3>
<KEYWORD3>dag</KEYWORD3>
<KEYWORD3>code</KEYWORD3>
</KEYWORDS>
</RULES>
</MODE>
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/lint/cpp_lint.py | #!/usr/bin/python
#
# Checks C++ files to make sure they conform to LLVM standards, as specified in
# http://llvm.org/docs/CodingStandards.html .
#
# TODO: add unittests for the verifier functions:
# http://docs.python.org/library/unittest.html .
import common_lint
import re
import sys
def VerifyIncludes(filename, lines):
"""Makes sure the #includes are in proper order and no disallows files are
#included.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
"""
lint = []
include_gtest_re = re.compile(r'^#include "gtest/(.*)"')
include_llvm_re = re.compile(r'^#include "llvm/(.*)"')
include_support_re = re.compile(r'^#include "(Support/.*)"')
include_config_re = re.compile(r'^#include "(Config/.*)"')
include_system_re = re.compile(r'^#include <(.*)>')
DISALLOWED_SYSTEM_HEADERS = ['iostream']
line_num = 1
prev_config_header = None
prev_system_header = None
for line in lines:
# TODO: implement private headers
# TODO: implement gtest headers
# TODO: implement top-level llvm/* headers
# TODO: implement llvm/Support/* headers
# Process Config/* headers
config_header = include_config_re.match(line)
if config_header:
curr_config_header = config_header.group(1)
if prev_config_header:
if prev_config_header > curr_config_header:
lint.append((filename, line_num,
'Config headers not in order: "%s" before "%s"' % (
prev_config_header, curr_config_header)))
# Process system headers
system_header = include_system_re.match(line)
if system_header:
curr_system_header = system_header.group(1)
# Is it blacklisted?
if curr_system_header in DISALLOWED_SYSTEM_HEADERS:
lint.append((filename, line_num,
'Disallowed system header: <%s>' % curr_system_header))
elif prev_system_header:
# Make sure system headers are alphabetized amongst themselves
if prev_system_header > curr_system_header:
lint.append((filename, line_num,
'System headers not in order: <%s> before <%s>' % (
prev_system_header, curr_system_header)))
prev_system_header = curr_system_header
line_num += 1
return lint
class CppLint(common_lint.BaseLint):
MAX_LINE_LENGTH = 80
def RunOnFile(self, filename, lines):
lint = []
lint.extend(VerifyIncludes(filename, lines))
lint.extend(common_lint.VerifyLineLength(filename, lines,
CppLint.MAX_LINE_LENGTH))
lint.extend(common_lint.VerifyTabs(filename, lines))
lint.extend(common_lint.VerifyTrailingWhitespace(filename, lines))
return lint
def CppLintMain(filenames):
all_lint = common_lint.RunLintOverAllFiles(CppLint(), filenames)
for lint in all_lint:
print '%s:%d:%s' % (lint[0], lint[1], lint[2])
return 0
if __name__ == '__main__':
sys.exit(CppLintMain(sys.argv[1:]))
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/lint/generic_lint.py | #!/usr/bin/python
#
# Checks files to make sure they conform to LLVM standards which can be applied
# to any programming language: at present, line length and trailing whitespace.
import common_lint
import sys
class GenericCodeLint(common_lint.BaseLint):
MAX_LINE_LENGTH = 80
def RunOnFile(self, filename, lines):
common_lint.VerifyLineLength(filename, lines,
GenericCodeLint.MAX_LINE_LENGTH)
common_lint.VerifyTrailingWhitespace(filename, lines)
def GenericCodeLintMain(filenames):
common_lint.RunLintOverAllFiles(GenericCodeLint(), filenames)
return 0
if __name__ == '__main__':
sys.exit(GenericCodeLintMain(sys.argv[1:]))
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/lint/remove_trailing_whitespace.sh | #!/bin/sh
# Deletes trailing whitespace in-place in the passed-in files.
# Sample syntax:
# $0 *.cpp
perl -pi -e 's/\s+$/\n/' $*
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/lint/common_lint.py | #!/usr/bin/python
#
# Common lint functions applicable to multiple types of files.
import re
def VerifyLineLength(filename, lines, max_length):
"""Checks to make sure the file has no lines with lines exceeding the length
limit.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
max_length: maximum acceptable line length as number
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
line_num = 1
for line in lines:
length = len(line.rstrip('\n'))
if length > max_length:
lint.append((filename, line_num,
'Line exceeds %d chars (%d)' % (max_length, length)))
line_num += 1
return lint
def VerifyTabs(filename, lines):
"""Checks to make sure the file has no tab characters.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
Returns:
A list of tuples with format [(line_number, msg), ...] with any violations
found.
"""
lint = []
tab_re = re.compile(r'\t')
line_num = 1
for line in lines:
if tab_re.match(line.rstrip('\n')):
lint.append((filename, line_num, 'Tab found instead of whitespace'))
line_num += 1
return lint
def VerifyTrailingWhitespace(filename, lines):
"""Checks to make sure the file has no lines with trailing whitespace.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
trailing_whitespace_re = re.compile(r'\s+$')
line_num = 1
for line in lines:
if trailing_whitespace_re.match(line.rstrip('\n')):
lint.append((filename, line_num, 'Trailing whitespace'))
line_num += 1
return lint
class BaseLint:
def RunOnFile(filename, lines):
raise Exception('RunOnFile() unimplemented')
def RunLintOverAllFiles(linter, filenames):
"""Runs linter over the contents of all files.
Args:
lint: subclass of BaseLint, implementing RunOnFile()
filenames: list of all files whose contents will be linted
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
for filename in filenames:
file = open(filename, 'r')
if not file:
print 'Cound not open %s' % filename
continue
lines = file.readlines()
lint.extend(linter.RunOnFile(filename, lines))
return lint
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/testgen/mc-bundling-x86-gen.py | #!/usr/bin/python
# Auto-generates an exhaustive and repetitive test for correct bundle-locked
# alignment on x86.
# For every possible offset in an aligned bundle, a bundle-locked group of every
# size in the inclusive range [1, bundle_size] is inserted. An appropriate CHECK
# is added to verify that NOP padding occurred (or did not occur) as expected.
# Run with --align-to-end to generate a similar test with align_to_end for each
# .bundle_lock directive.
# This script runs with Python 2.7 and 3.2+
from __future__ import print_function
import argparse
BUNDLE_SIZE_POW2 = 4
BUNDLE_SIZE = 2 ** BUNDLE_SIZE_POW2
PREAMBLE = '''
# RUN: llvm-mc -filetype=obj -triple i386-pc-linux-gnu %s -o - \\
# RUN: | llvm-objdump -triple i386 -disassemble -no-show-raw-insn - | FileCheck %s
# !!! This test is auto-generated from utils/testgen/mc-bundling-x86-gen.py !!!
# It tests that bundle-aligned grouping works correctly in MC. Read the
# source of the script for more details.
.text
.bundle_align_mode {0}
'''.format(BUNDLE_SIZE_POW2).lstrip()
ALIGNTO = ' .align {0}, 0x90'
NOPFILL = ' .fill {0}, 1, 0x90'
def print_bundle_locked_sequence(len, align_to_end=False):
print(' .bundle_lock{0}'.format(' align_to_end' if align_to_end else ''))
print(' .rept {0}'.format(len))
print(' inc %eax')
print(' .endr')
print(' .bundle_unlock')
def generate(align_to_end=False):
print(PREAMBLE)
ntest = 0
for instlen in range(1, BUNDLE_SIZE + 1):
for offset in range(0, BUNDLE_SIZE):
# Spread out all the instructions to not worry about cross-bundle
# interference.
print(ALIGNTO.format(2 * BUNDLE_SIZE))
print('INSTRLEN_{0}_OFFSET_{1}:'.format(instlen, offset))
if offset > 0:
print(NOPFILL.format(offset))
print_bundle_locked_sequence(instlen, align_to_end)
# Now generate an appropriate CHECK line
base_offset = ntest * 2 * BUNDLE_SIZE
inst_orig_offset = base_offset + offset # had it not been padded...
def print_check(adjusted_offset=None, nop_split_offset=None):
if adjusted_offset is not None:
print('# CHECK: {0:x}: nop'.format(inst_orig_offset))
if nop_split_offset is not None:
print('# CHECK: {0:x}: nop'.format(nop_split_offset))
print('# CHECK: {0:x}: incl'.format(adjusted_offset))
else:
print('# CHECK: {0:x}: incl'.format(inst_orig_offset))
if align_to_end:
if offset + instlen == BUNDLE_SIZE:
# No padding needed
print_check()
elif offset + instlen < BUNDLE_SIZE:
# Pad to end at nearest bundle boundary
offset_to_end = base_offset + (BUNDLE_SIZE - instlen)
print_check(offset_to_end)
else: # offset + instlen > BUNDLE_SIZE
# Pad to end at next bundle boundary, splitting the nop sequence
# at the nearest bundle boundary
offset_to_nearest_bundle = base_offset + BUNDLE_SIZE
offset_to_end = base_offset + (BUNDLE_SIZE * 2 - instlen)
if offset_to_nearest_bundle == offset_to_end:
offset_to_nearest_bundle = None
print_check(offset_to_end, offset_to_nearest_bundle)
else:
if offset + instlen > BUNDLE_SIZE:
# Padding needed
aligned_offset = (inst_orig_offset + instlen) & ~(BUNDLE_SIZE - 1)
print_check(aligned_offset)
else:
# No padding needed
print_check()
print()
ntest += 1
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--align-to-end',
action='store_true',
help='generate .bundle_lock with align_to_end option')
args = argparser.parse_args()
generate(align_to_end=args.align_to_end)
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/lit/setup.py | import lit
import os
from setuptools import setup, find_packages
# setuptools expects to be invoked from within the directory of setup.py, but it
# is nice to allow:
# python path/to/setup.py install
# to work (for scripts, etc.)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
setup(
name = "lit",
version = lit.__version__,
author = lit.__author__,
author_email = lit.__email__,
url = 'http://llvm.org',
license = 'BSD',
description = "A Software Testing Tool",
keywords = 'test C++ automatic discovery',
long_description = """\
*lit*
+++++
About
=====
*lit* is a portable tool for executing LLVM and Clang style test suites,
summarizing their results, and providing indication of failures. *lit* is
designed to be a lightweight testing tool with as simple a user interface as
possible.
Features
========
* Portable!
* Flexible test discovery.
* Parallel test execution.
* Support for multiple test formats and test suite designs.
Documentation
=============
The official *lit* documentation is in the man page, available online at the LLVM
Command Guide: http://llvm.org/cmds/lit.html.
Source
======
The *lit* source is available as part of LLVM, in the LLVM SVN repository:
http://llvm.org/svn/llvm-project/llvm/trunk/utils/lit.
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: University of Illinois/NCSA Open Source License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Testing',
],
zip_safe = False,
packages = find_packages(),
entry_points = {
'console_scripts': [
'lit = lit:main',
],
}
)
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/lit/lit.py | #!/usr/bin/env python
if __name__=='__main__':
import lit
lit.main()
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/lit/MANIFEST.in | include TODO lit.py
recursive-include tests *
recursive-include examples *
global-exclude *pyc
global-exclude *~
prune tests/Output
prune tests/*/Output
prune tests/*/*/Output
prune tests/*/*/*/Output
|
0 | repos/DirectXShaderCompiler/utils | repos/DirectXShaderCompiler/utils/lit/README.txt | ===============================
lit - A Software Testing Tool
===============================
lit is a portable tool for executing LLVM and Clang style test suites,
summarizing their results, and providing indication of failures. lit is designed
to be a lightweight testing tool with as simple a user interface as possible.
|
0 | repos/DirectXShaderCompiler/utils/lit | repos/DirectXShaderCompiler/utils/lit/tests/test-data.py | # Test features related to formats which support reporting additional test data.
# RUN: %{lit} -j 1 -v %{inputs}/test-data > %t.out
# RUN: FileCheck < %t.out %s
# CHECK: -- Testing:
# CHECK: PASS: test-data :: metrics.ini
# CHECK-NEXT: *** TEST 'test-data :: metrics.ini' RESULTS ***
# CHECK-NEXT: value0: 1
# CHECK-NEXT: value1: 2.3456
# CHECK-NEXT: ***
|
0 | repos/DirectXShaderCompiler/utils/lit | repos/DirectXShaderCompiler/utils/lit/tests/shtest-shell.py | # Check the internal shell handling component of the ShTest format.
#
# RUN: not %{lit} -j 1 -v %{inputs}/shtest-shell > %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: FAIL: shtest-shell :: error-0.txt
# CHECK: *** TEST 'shtest-shell :: error-0.txt' FAILED ***
# CHECK: Command 0: "not-a-real-command"
# CHECK: Command 0 Result: 127
# CHECK: Command 0 Stderr:
# CHECK: 'not-a-real-command': command not found
# CHECK: ***
# FIXME: The output here sucks.
#
# CHECK: FAIL: shtest-shell :: error-1.txt
# CHECK: *** TEST 'shtest-shell :: error-1.txt' FAILED ***
# CHECK: shell parser error on: 'echo "missing quote'
# CHECK: ***
# CHECK: FAIL: shtest-shell :: error-2.txt
# CHECK: *** TEST 'shtest-shell :: error-2.txt' FAILED ***
# CHECK: Unsupported redirect:
# CHECK: ***
# CHECK: PASS: shtest-shell :: redirects.txt
# CHECK: PASS: shtest-shell :: sequencing-0.txt
# CHECK: XFAIL: shtest-shell :: sequencing-1.txt
# CHECK: Failing Tests (3)
|
0 | repos/DirectXShaderCompiler/utils/lit | repos/DirectXShaderCompiler/utils/lit/tests/progress-bar.py | # Check the simple progress bar.
#
# RUN: not %{lit} -j 1 -s %{inputs}/progress-bar > %t.out
# RUN: FileCheck < %t.out %s
#
# CHECK: Testing: 0 .. 10.. 20
# CHECK: FAIL: shtest-shell :: test-1.txt (1 of 4)
# CHECK: Testing: 0 .. 10.. 20.. 30.. 40..
# CHECK: FAIL: shtest-shell :: test-2.txt (2 of 4)
# CHECK: Testing: 0 .. 10.. 20.. 30.. 40.. 50.. 60.. 70
# CHECK: FAIL: shtest-shell :: test-3.txt (3 of 4)
# CHECK: Testing: 0 .. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90..
# CHECK: FAIL: shtest-shell :: test-4.txt (4 of 4)
|
0 | repos/DirectXShaderCompiler/utils/lit | repos/DirectXShaderCompiler/utils/lit/tests/shell-parsing.py | # Just run the ShUtil unit tests.
#
# RUN: %{python} -m lit.ShUtil
|
0 | repos/DirectXShaderCompiler/utils/lit | repos/DirectXShaderCompiler/utils/lit/tests/shtest-if-else.py | # RUN: %{lit} -v --show-all %{inputs}/shtest-if-else/test.txt \
# RUN: | FileCheck %{inputs}/shtest-if-else/test.txt
# RUN: not %{lit} -v %{inputs}/shtest-if-else/test-neg1.txt 2>&1 \
# RUN: | FileCheck %{inputs}/shtest-if-else/test-neg1.txt
# RUN: not %{lit} -v %{inputs}/shtest-if-else/test-neg2.txt 2>&1 \
# RUN: | FileCheck %{inputs}/shtest-if-else/test-neg2.txt
# RUN: not %{lit} -v %{inputs}/shtest-if-else/test-neg3.txt 2>&1 \
# RUN: | FileCheck %{inputs}/shtest-if-else/test-neg3.txt
# RUN: not %{lit} -v %{inputs}/shtest-if-else/test-neg4.txt 2>&1 \
# RUN: | FileCheck %{inputs}/shtest-if-else/test-neg4.txt
|
0 | repos/DirectXShaderCompiler/utils/lit | repos/DirectXShaderCompiler/utils/lit/tests/usage.py | # Basic sanity check that usage works.
#
# RUN: %{lit} --help > %t.out
# RUN: FileCheck < %t.out %s
#
# CHECK: Usage: lit.py [options] {file-or-path}
|
0 | repos/DirectXShaderCompiler/utils/lit | repos/DirectXShaderCompiler/utils/lit/tests/googletest-format.py | # Check the various features of the GoogleTest format.
#
# RUN: not %{lit} -j 1 -v %{inputs}/googletest-format > %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: PASS: googletest-format :: DummySubDir/OneTest/FirstTest.subTestA
# CHECK: FAIL: googletest-format :: DummySubDir/OneTest/FirstTest.subTestB
# CHECK-NEXT: *** TEST 'googletest-format :: DummySubDir/OneTest/FirstTest.subTestB' FAILED ***
# CHECK-NEXT: I am subTest B, I FAIL
# CHECK-NEXT: And I have two lines of output
# CHECK: ***
# CHECK: PASS: googletest-format :: DummySubDir/OneTest/ParameterizedTest/0.subTest
# CHECK: PASS: googletest-format :: DummySubDir/OneTest/ParameterizedTest/1.subTest
# CHECK: Failing Tests (1)
# CHECK: Expected Passes : 3
# CHECK: Unexpected Failures: 1
|
0 | repos/DirectXShaderCompiler/utils/lit | repos/DirectXShaderCompiler/utils/lit/tests/discovery.py | # Check the basic discovery process, including a sub-suite.
#
# RUN: %{lit} %{inputs}/discovery \
# RUN: -j 1 --debug --show-tests --show-suites \
# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-BASIC-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-BASIC-ERR < %t.err %s
#
# CHECK-BASIC-ERR: loading suite config '{{.*}}/discovery/lit.cfg'
# CHECK-BASIC-ERR: loading suite config '{{.*}}/discovery/subsuite/lit.cfg'
# CHECK-BASIC-ERR: loading local config '{{.*}}/discovery/subdir/lit.local.cfg'
#
# CHECK-BASIC-OUT: -- Test Suites --
# CHECK-BASIC-OUT: sub-suite - 2 tests
# CHECK-BASIC-OUT: Source Root: {{.*/discovery/subsuite$}}
# CHECK-BASIC-OUT: Exec Root : {{.*/discovery/subsuite$}}
# CHECK-BASIC-OUT: top-level-suite - 3 tests
# CHECK-BASIC-OUT: Source Root: {{.*/discovery$}}
# CHECK-BASIC-OUT: Exec Root : {{.*/discovery$}}
#
# CHECK-BASIC-OUT: -- Available Tests --
# CHECK-BASIC-OUT: sub-suite :: test-one
# CHECK-BASIC-OUT: sub-suite :: test-two
# CHECK-BASIC-OUT: top-level-suite :: subdir/test-three
# CHECK-BASIC-OUT: top-level-suite :: test-one
# CHECK-BASIC-OUT: top-level-suite :: test-two
# Check discovery when exact test names are given.
#
# RUN: %{lit} \
# RUN: %{inputs}/discovery/subdir/test-three.py \
# RUN: %{inputs}/discovery/subsuite/test-one.txt \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-EXACT-TEST < %t.out %s
#
# CHECK-EXACT-TEST: -- Available Tests --
# CHECK-EXACT-TEST: sub-suite :: test-one
# CHECK-EXACT-TEST: top-level-suite :: subdir/test-three
# Check discovery when using an exec path.
#
# RUN: %{lit} %{inputs}/exec-discovery \
# RUN: -j 1 --debug --show-tests --show-suites \
# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-ERR < %t.err %s
#
# CHECK-ASEXEC-ERR: loading suite config '{{.*}}/exec-discovery/lit.site.cfg'
# CHECK-ASEXEC-ERR: load_config from '{{.*}}/discovery/lit.cfg'
# CHECK-ASEXEC-ERR: loaded config '{{.*}}/discovery/lit.cfg'
# CHECK-ASEXEC-ERR: loaded config '{{.*}}/exec-discovery/lit.site.cfg'
# CHECK-ASEXEC-ERR: loading suite config '{{.*}}/discovery/subsuite/lit.cfg'
# CHECK-ASEXEC-ERR: loading local config '{{.*}}/discovery/subdir/lit.local.cfg'
#
# CHECK-ASEXEC-OUT: -- Test Suites --
# CHECK-ASEXEC-OUT: sub-suite - 2 tests
# CHECK-ASEXEC-OUT: Source Root: {{.*/discovery/subsuite$}}
# CHECK-ASEXEC-OUT: Exec Root : {{.*/discovery/subsuite$}}
# CHECK-ASEXEC-OUT: top-level-suite - 3 tests
# CHECK-ASEXEC-OUT: Source Root: {{.*/discovery$}}
# CHECK-ASEXEC-OUT: Exec Root : {{.*/exec-discovery$}}
#
# CHECK-ASEXEC-OUT: -- Available Tests --
# CHECK-ASEXEC-OUT: sub-suite :: test-one
# CHECK-ASEXEC-OUT: sub-suite :: test-two
# CHECK-ASEXEC-OUT: top-level-suite :: subdir/test-three
# CHECK-ASEXEC-OUT: top-level-suite :: test-one
# CHECK-ASEXEC-OUT: top-level-suite :: test-two
# Check discovery when exact test names are given.
#
# FIXME: Note that using a path into a subsuite doesn't work correctly here.
#
# RUN: %{lit} \
# RUN: %{inputs}/exec-discovery/subdir/test-three.py \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-EXACT-TEST < %t.out %s
#
# CHECK-ASEXEC-EXACT-TEST: -- Available Tests --
# CHECK-ASEXEC-EXACT-TEST: top-level-suite :: subdir/test-three
# Check that we don't recurse infinitely when loading an site specific test
# suite located inside the test source root.
#
# RUN: %{lit} \
# RUN: %{inputs}/exec-discovery-in-tree/obj/ \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-INTREE < %t.out %s
#
# CHECK-ASEXEC-INTREE: exec-discovery-in-tree-suite - 1 tests
# CHECK-ASEXEC-INTREE-NEXT: Source Root: {{.*/exec-discovery-in-tree$}}
# CHECK-ASEXEC-INTREE-NEXT: Exec Root : {{.*/exec-discovery-in-tree/obj$}}
# CHECK-ASEXEC-INTREE-NEXT: -- Available Tests --
# CHECK-ASEXEC-INTREE-NEXT: exec-discovery-in-tree-suite :: test-one
|
0 | repos/DirectXShaderCompiler/utils/lit | repos/DirectXShaderCompiler/utils/lit/tests/xunit-output.py | # Check xunit output
# RUN: %{lit} --xunit-xml-output %t.xunit.xml --xml-include-test-output %{inputs}/test-data
# RUN: FileCheck < %t.xunit.xml %s
# CHECK: <?xml version="1.0" encoding="UTF-8" ?>
# CHECK: <testsuites>
# CHECK: <testsuite name='test-data' tests='2' failures='0'>
# CHECK: <testcase classname='test-data.test-data' name='metrics.ini' time='0.{{[0-9]+}}'>
# CHECK: <system-out>
# CHECK: Test passed.
# CHECK: </system-out>
# CHECK: <testcase classname='test-data.test-data' name='utf8_output_message.ini' time='0.{{[0-9]+}}'>
# CHECK: <system-out>
# CHECK: This test is 🔥
# CHECK: </system-out>
# CHECK: </testcase>
# CHECK: </testsuite>
# CHECK: </testsuites>
|
0 | repos/DirectXShaderCompiler/utils/lit | repos/DirectXShaderCompiler/utils/lit/tests/test-output.py | # RUN: %{lit} -j 1 -v %{inputs}/test-data --output %t.results.out > %t.out
# RUN: FileCheck < %t.results.out %s
# CHECK: {
# CHECK: "__version__"
# CHECK: "elapsed"
# CHECK-NEXT: "tests": [
# CHECK-NEXT: {
# CHECK-NEXT: "code": "PASS",
# CHECK-NEXT: "elapsed": {{[0-9.]+}},
# CHECK-NEXT: "metrics": {
# CHECK-NEXT: "value0": 1,
# CHECK-NEXT: "value1": 2.3456
# CHECK-NEXT: }
# CHECK-NEXT: "name": "test-data :: metrics.ini",
# CHECK-NEXT: "output": "Test passed."
# CHECK-NEXT: }
# CHECK-NEXT: ]
# CHECK-NEXT: }
|
0 | repos/DirectXShaderCompiler/utils/lit | repos/DirectXShaderCompiler/utils/lit/tests/shtest-format.py | # Check the various features of the ShTest format.
#
# RUN: not %{lit} -j 1 -v %{inputs}/shtest-format > %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: PASS: shtest-format :: argv0.txt
# CHECK: FAIL: shtest-format :: external_shell/fail.txt
# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail.txt' FAILED ***
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: line 1: failed test output on stdout
# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: Command Output (stderr):
# CHECK-NEXT: --
# CHECK-NEXT: cat: does-not-exist: No such file or directory
# CHECK: --
# CHECK: FAIL: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail_with_bad_encoding.txt' FAILED ***
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: a line with bad encoding:
# CHECK: --
# CHECK: PASS: shtest-format :: external_shell/pass.txt
# CHECK: FAIL: shtest-format :: fail.txt
# CHECK-NEXT: *** TEST 'shtest-format :: fail.txt' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: printf "line 1
# CHECK-NEXT: false
# CHECK-NEXT: --
# CHECK-NEXT: Exit Code: 1
#
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: Command 0: "printf"
# CHECK-NEXT: Command 0 Result: 0
# CHECK-NEXT: Command 0 Output:
# CHECK-NEXT: line 1: failed test output on stdout
# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: UNRESOLVED: shtest-format :: no-test-line.txt
# CHECK: PASS: shtest-format :: pass.txt
# CHECK: UNSUPPORTED: shtest-format :: requires-missing.txt
# CHECK: PASS: shtest-format :: requires-present.txt
# CHECK: UNSUPPORTED: shtest-format :: unsupported_dir/some-test.txt
# CHECK: XFAIL: shtest-format :: xfail-feature.txt
# CHECK: XFAIL: shtest-format :: xfail-target.txt
# CHECK: XFAIL: shtest-format :: xfail.txt
# CHECK: XPASS: shtest-format :: xpass.txt
# CHECK-NEXT: *** TEST 'shtest-format :: xpass.txt' FAILED ***
# CHECK-NEXT: Script
# CHECK-NEXT: --
# CHECK-NEXT: true
# CHECK-NEXT: --
# CHECK: Testing Time
# CHECK: Unexpected Passing Tests (1)
# CHECK: shtest-format :: xpass.txt
# CHECK: Failing Tests (3)
# CHECK: shtest-format :: external_shell/fail.txt
# CHECK: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK: shtest-format :: fail.txt
# CHECK: Expected Passes : 4
# CHECK: Expected Failures : 3
# CHECK: Unsupported Tests : 2
# CHECK: Unresolved Tests : 1
# CHECK: Unexpected Passes : 1
# CHECK: Unexpected Failures: 3
|
0 | repos/DirectXShaderCompiler/utils/lit | repos/DirectXShaderCompiler/utils/lit/tests/unittest-adaptor.py | # Check the lit adaption to run under unittest.
#
# RUN: %{python} %s %{inputs}/unittest-adaptor 2> %t.err
# RUN: FileCheck < %t.err %s
#
# CHECK-DAG: unittest-adaptor :: test-two.txt ... FAIL
# CHECK-DAG: unittest-adaptor :: test-one.txt ... ok
import unittest
import sys
import lit
import lit.discovery
input_path = sys.argv[1]
unittest_suite = lit.discovery.load_test_suite([input_path])
runner = unittest.TextTestRunner(verbosity=2)
runner.run(unittest_suite)
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/unittest-adaptor/test-one.txt | # RUN: true
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/unittest-adaptor/test-two.txt | # RUN: false
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/xfail-target.txt | RUN: false
XFAIL: x86_64
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/xfail-feature.txt | # RUN: false
# XFAIL: a-present-feature
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/argv0.txt | # Check that we route argv[0] as it was written, instead of the resolved
# path. This is important for some tools, in particular '[' which at least on OS
# X only recognizes that it is in '['-mode when its argv[0] is exactly
# '['. Otherwise it will refuse to accept the trailing closing bracket.
#
# RUN: [ "A" = "A" ]
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/requires-missing.txt | RUN: true
REQUIRES: a-missing-feature
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/xpass.txt | RUN: true
XFAIL: x86_64
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/fail.txt | # RUN: printf "line 1: failed test output on stdout\nline 2: failed test output on stdout"
# RUN: false
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/xfail.txt | RUN: false
XFAIL: *
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/pass.txt | # RUN: true
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/requires-present.txt | RUN: true
REQUIRES: a-present-feature
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/no-test-line.txt | # Empty!
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/external_shell/fail.txt | # Run a command that fails with error on stdout.
#
# RUN: echo "line 1: failed test output on stdout"
# RUN: echo "line 2: failed test output on stdout"
# RUN: cat "does-not-exist"
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/external_shell/pass.txt | # RUN: true
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/external_shell/fail_with_bad_encoding.txt | # Run a command that fails with error on stdout.
#
# RUN: %S/write-bad-encoding.sh
# RUN: false
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-format/unsupported_dir/some-test.txt | # RUN: true
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/discovery/test-one.txt | # RUN: true
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/discovery/test-two.txt | # RUN: true
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/discovery | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/discovery/subdir/test-three.py | # RUN: true
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/discovery | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/discovery/subsuite/test-one.txt | # RUN: true
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/discovery | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/discovery/subsuite/test-two.txt | # RUN: true
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/test-data/utf8_output_message.ini | [global]
result_code = PASS
result_output = This test is 🔥
[results]
value0 = 1
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/test-data/metrics.ini | [global]
result_code = PASS
result_output = Test passed.
[results]
value0 = 1
value1 = 2.3456 |
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-if-else/test-neg4.txt | # CHECK: ValueError: '%}' is missing for %else substitution
#
# RUN: %if feature %{ echo %} %else %{ fail
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-if-else/test-neg2.txt | # CHECK: ValueError: '%}' is missing for %if substitution
#
# RUN: %if feature %{ echo
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-if-else/test.txt | # CHECK: -- Testing:{{.*}}
# CHECK-NEXT: PASS: shtest-if-else :: test.txt (1 of 1)
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# RUN: %if feature %{ echo "feature" %} %else %{ echo "missing feature" %}
# CHECK-NEXT: echo "feature"
#
# RUN: %if nofeature %{ echo "found unicorn" %} %else %{ echo "nofeature" %}
# CHECK-NEXT: "nofeature"
# CHECK-NOT: found unicorn
# Spaces inside curly braces are not ignored
#
# RUN: echo test-%if feature %{ 3 %} %else %{ echo "fail" %}-test
# RUN: echo test-%if feature %{ 4 4 %} %else %{ echo "fail" %}-test
# RUN: echo test-%if nofeature %{ echo "fail" %} %else %{ 5 5 %}-test
# CHECK-NEXT: echo test- 3 -test
# CHECK-NOT: echo "fail"
# CHECK-NEXT: echo test- 4 4 -test
# CHECK-NOT: echo "fail"
# CHECK-NEXT: echo test- 5 5 -test
# CHECK-NOT: echo "fail"
# Escape line breaks for multi-line expressions
#
# RUN: %if feature \
# RUN: %{ echo \
# RUN: "test-5" \
# RUN: %} %else %{ echo "fail" %}
# CHECK-NEXT: echo "test-5"
# RUN: %if nofeature \
# RUN: %{ echo "fail" %} \
# RUN: %else \
# RUN: %{ echo "test-6" %}
# CHECK-NEXT: echo "test-6"
# RUN: echo "test%if feature %{%} %else %{%}-7"
# CHECK-NEXT: echo "test-7"
# Nested expressions are supported:
#
# RUN: echo %if feature %{ %if feature %{ %if nofeature %{"fail"%} %else %{"test-9"%} %} %}
# CHECK-NEXT: echo "test-9"
# Spaces between %if and %else are ignored. If there is no %else -
# space after %if %{...%} is not ignored.
#
# RUN: echo XX %if feature %{YY%} ZZ
# RUN: echo AA %if feature %{BB%} %else %{CC%} DD
# RUN: echo AA %if nofeature %{BB%} %else %{CC%} DD
# CHECK-NEXT: echo XX YY ZZ
# CHECK-NEXT: echo AA BB DD
# CHECK-NEXT: echo AA CC DD
# '{' and '}' can be used without escaping
#
# RUN: %if feature %{echo {}%}
# CHECK-NEXT: echo {}
# Spaces are not required
#
# RUN: echo %if feature%{"ok"%}%else%{"fail"%}
# CHECK-NEXT: echo "ok"
# Substitutions with braces are handled correctly
#
# RUN: echo %{sub} %if feature%{test-%{sub}%}%else%{"fail"%}
# CHECK-NEXT: echo ok test-ok
# CHECK-NEXT: --
# CHECK-NEXT: Exit Code: 0
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-if-else/test-neg1.txt | # CHECK: ValueError: '%{' is missing for %if substitution
#
# RUN: %if feature echo "test-1"
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-if-else/test-neg3.txt | # CHECK: ValueError: '%{' is missing for %else substitution
#
# RUN: %if feature %{ echo %} %else fail
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/progress-bar/test-1.txt | # RUN: false
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/progress-bar/test-4.txt | # RUN: false
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/progress-bar/test-3.txt | # RUN: false
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/progress-bar/test-2.txt | # RUN: false
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-shell/write-to-stdout-and-stderr.sh | #!/bin/sh
echo "a line on stdout"
echo "a line on stderr" 1>&2
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-shell/sequencing-1.txt | # RUN: false && true
# XFAIL: *
|
0 | repos/DirectXShaderCompiler/utils/lit/tests/Inputs | repos/DirectXShaderCompiler/utils/lit/tests/Inputs/shtest-shell/sequencing-0.txt | # Check sequencing operations.
#
# RUN: echo "first-line" > %t.out && echo "second-line" >> %t.out
# RUN: FileCheck --check-prefix CHECK-AND < %t.out %s
#
# CHECK-AND: first-line
# CHECK-AND: second-line
#
# The false case of && is tested in sequencing-2.txt
# RUN: echo "first-line" > %t.out || echo "second-line" >> %t.out
# RUN: FileCheck --check-prefix CHECK-OR-1 < %t.out %s
#
# CHECK-OR-1: first-line
# CHECK-OR-1-NOT: second-line
# RUN: false || echo "second-line" > %t.out
# RUN: FileCheck --check-prefix CHECK-OR-2 < %t.out %s
#
# CHECK-OR-2: second-line
# RUN: echo "first-line" > %t.out; echo "second-line" >> %t.out
# RUN: FileCheck --check-prefix CHECK-SEQ < %t.out %s
#
# CHECK-SEQ: first-line
# CHECK-SEQ: second-line
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.