Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/CMakeLists.txt
add_llvm_library(LLVMAsmPrinter AddressPool.cpp ARMException.cpp AsmPrinter.cpp AsmPrinterDwarf.cpp AsmPrinterInlineAsm.cpp DbgValueHistoryCalculator.cpp DebugLocStream.cpp DIE.cpp DIEHash.cpp DwarfAccelTable.cpp DwarfCFIException.cpp DwarfCompileUnit.cpp DwarfDebug.cpp DwarfExpression.cpp DwarfFile.cpp DwarfStringPool.cpp DwarfUnit.cpp EHStreamer.cpp ErlangGCPrinter.cpp OcamlGCPrinter.cpp WinException.cpp WinCodeViewLineTables.cpp ) add_dependencies(LLVMAsmPrinter intrinsics_gen)
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
//===-- AsmPrinter.cpp - Common AsmPrinter code ---------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the AsmPrinter class. // //===----------------------------------------------------------------------===// #include "llvm/CodeGen/AsmPrinter.h" #include "DwarfDebug.h" #include "DwarfException.h" #include "WinException.h" #include "WinCodeViewLineTables.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/GCMetadataPrinter.h" #include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBundle.h" #include "llvm/CodeGen/MachineJumpTableInfo.h" #include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/MachineModuleInfoImpls.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/Mangler.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCSection.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbolELF.h" #include "llvm/MC/MCValue.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Format.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/Timer.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetSubtargetInfo.h" using namespace llvm; #define DEBUG_TYPE "asm-printer" static const char *const DWARFGroupName = "DWARF Emission"; static const char *const DbgTimerName = "Debug Info Emission"; static const char *const EHTimerName = "DWARF Exception Writer"; static const char *const CodeViewLineTablesGroupName = "CodeView Line Tables"; STATISTIC(EmittedInsts, "Number of machine instrs printed"); char AsmPrinter::ID = 0; typedef DenseMap<GCStrategy*, std::unique_ptr<GCMetadataPrinter>> gcp_map_type; static gcp_map_type &getGCMap(void *&P) { if (!P) P = new gcp_map_type(); return *(gcp_map_type*)P; } /// getGVAlignmentLog2 - Return the alignment to use for the specified global /// value in log2 form. This rounds up to the preferred alignment if possible /// and legal. static unsigned getGVAlignmentLog2(const GlobalValue *GV, const DataLayout &DL, unsigned InBits = 0) { unsigned NumBits = 0; if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) NumBits = DL.getPreferredAlignmentLog(GVar); // If InBits is specified, round it to it. if (InBits > NumBits) NumBits = InBits; // If the GV has a specified alignment, take it into account. if (GV->getAlignment() == 0) return NumBits; unsigned GVAlign = Log2_32(GV->getAlignment()); // If the GVAlign is larger than NumBits, or if we are required to obey // NumBits because the GV has an assigned section, obey it. if (GVAlign > NumBits || GV->hasSection()) NumBits = GVAlign; return NumBits; } AsmPrinter::AsmPrinter(TargetMachine &tm, std::unique_ptr<MCStreamer> Streamer) : MachineFunctionPass(ID), TM(tm), MAI(tm.getMCAsmInfo()), OutContext(Streamer->getContext()), OutStreamer(std::move(Streamer)), LastMI(nullptr), LastFn(0), Counter(~0U) { DD = nullptr; MMI = nullptr; LI = nullptr; MF = nullptr; CurExceptionSym = CurrentFnSym = CurrentFnSymForSize = nullptr; CurrentFnBegin = nullptr; CurrentFnEnd = nullptr; GCMetadataPrinters = nullptr; VerboseAsm = OutStreamer->isVerboseAsm(); } AsmPrinter::~AsmPrinter() { assert(!DD && Handlers.empty() && "Debug/EH info didn't get finalized"); if (GCMetadataPrinters) { gcp_map_type &GCMap = getGCMap(GCMetadataPrinters); delete &GCMap; GCMetadataPrinters = nullptr; } } /// getFunctionNumber - Return a unique ID for the current function. /// unsigned AsmPrinter::getFunctionNumber() const { return MF->getFunctionNumber(); } const TargetLoweringObjectFile &AsmPrinter::getObjFileLowering() const { return *TM.getObjFileLowering(); } /// getDataLayout - Return information about data layout. const DataLayout &AsmPrinter::getDataLayout() const { return *TM.getDataLayout(); } const MCSubtargetInfo &AsmPrinter::getSubtargetInfo() const { assert(MF && "getSubtargetInfo requires a valid MachineFunction!"); return MF->getSubtarget<MCSubtargetInfo>(); } void AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) { S.EmitInstruction(Inst, getSubtargetInfo()); } StringRef AsmPrinter::getTargetTriple() const { return TM.getTargetTriple().str(); } /// getCurrentSection() - Return the current section we are emitting to. const MCSection *AsmPrinter::getCurrentSection() const { return OutStreamer->getCurrentSection().first; } void AsmPrinter::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); MachineFunctionPass::getAnalysisUsage(AU); AU.addRequired<MachineModuleInfo>(); AU.addRequired<GCModuleInfo>(); if (isVerbose()) AU.addRequired<MachineLoopInfo>(); } bool AsmPrinter::doInitialization(Module &M) { MMI = getAnalysisIfAvailable<MachineModuleInfo>(); // Initialize TargetLoweringObjectFile. const_cast<TargetLoweringObjectFile&>(getObjFileLowering()) .Initialize(OutContext, TM); OutStreamer->InitSections(false); Mang = new Mangler(); // Emit the version-min deplyment target directive if needed. // // FIXME: If we end up with a collection of these sorts of Darwin-specific // or ELF-specific things, it may make sense to have a platform helper class // that will work with the target helper class. For now keep it here, as the // alternative is duplicated code in each of the target asm printers that // use the directive, where it would need the same conditionalization // anyway. Triple TT(getTargetTriple()); if (TT.isOSDarwin()) { unsigned Major, Minor, Update; TT.getOSVersion(Major, Minor, Update); // If there is a version specified, Major will be non-zero. if (Major) OutStreamer->EmitVersionMin((TT.isMacOSX() ? MCVM_OSXVersionMin : MCVM_IOSVersionMin), Major, Minor, Update); } // Allow the target to emit any magic that it wants at the start of the file. EmitStartOfAsmFile(M); // Very minimal debug info. It is ignored if we emit actual debug info. If we // don't, this at least helps the user find where a global came from. if (MAI->hasSingleParameterDotFile()) { // .file "foo.c" OutStreamer->EmitFileDirective(M.getModuleIdentifier()); } GCModuleInfo *MI = getAnalysisIfAvailable<GCModuleInfo>(); assert(MI && "AsmPrinter didn't require GCModuleInfo?"); for (auto &I : *MI) if (GCMetadataPrinter *MP = GetOrCreateGCPrinter(*I)) MP->beginAssembly(M, *MI, *this); // Emit module-level inline asm if it exists. if (!M.getModuleInlineAsm().empty()) { // We're at the module level. Construct MCSubtarget from the default CPU // and target triple. std::unique_ptr<MCSubtargetInfo> STI(TM.getTarget().createMCSubtargetInfo( TM.getTargetTriple().str(), TM.getTargetCPU(), TM.getTargetFeatureString())); OutStreamer->AddComment("Start of file scope inline assembly"); OutStreamer->AddBlankLine(); EmitInlineAsm(M.getModuleInlineAsm()+"\n", *STI, TM.Options.MCOptions); OutStreamer->AddComment("End of file scope inline assembly"); OutStreamer->AddBlankLine(); } if (MAI->doesSupportDebugInformation()) { bool skip_dwarf = false; if (TM.getTargetTriple().isKnownWindowsMSVCEnvironment()) { Handlers.push_back(HandlerInfo(new WinCodeViewLineTables(this), DbgTimerName, CodeViewLineTablesGroupName)); // FIXME: Don't emit DWARF debug info if there's at least one function // with AddressSanitizer instrumentation. // This is a band-aid fix for PR22032. for (auto &F : M.functions()) { if (F.hasFnAttribute(Attribute::SanitizeAddress)) { skip_dwarf = true; break; } } } if (!skip_dwarf) { DD = new DwarfDebug(this, &M); Handlers.push_back(HandlerInfo(DD, DbgTimerName, DWARFGroupName)); } } EHStreamer *ES = nullptr; switch (MAI->getExceptionHandlingType()) { case ExceptionHandling::None: break; case ExceptionHandling::SjLj: case ExceptionHandling::DwarfCFI: ES = new DwarfCFIException(this); break; case ExceptionHandling::ARM: ES = new ARMException(this); break; case ExceptionHandling::WinEH: switch (MAI->getWinEHEncodingType()) { default: llvm_unreachable("unsupported unwinding information encoding"); case WinEH::EncodingType::Invalid: break; case WinEH::EncodingType::X86: case WinEH::EncodingType::Itanium: ES = new WinException(this); break; } break; } if (ES) Handlers.push_back(HandlerInfo(ES, EHTimerName, DWARFGroupName)); return false; } static bool canBeHidden(const GlobalValue *GV, const MCAsmInfo &MAI) { if (!MAI.hasWeakDefCanBeHiddenDirective()) return false; return canBeOmittedFromSymbolTable(GV); } void AsmPrinter::EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const { GlobalValue::LinkageTypes Linkage = GV->getLinkage(); switch (Linkage) { case GlobalValue::CommonLinkage: case GlobalValue::LinkOnceAnyLinkage: case GlobalValue::LinkOnceODRLinkage: case GlobalValue::WeakAnyLinkage: case GlobalValue::WeakODRLinkage: if (MAI->hasWeakDefDirective()) { // .globl _foo OutStreamer->EmitSymbolAttribute(GVSym, MCSA_Global); if (!canBeHidden(GV, *MAI)) // .weak_definition _foo OutStreamer->EmitSymbolAttribute(GVSym, MCSA_WeakDefinition); else OutStreamer->EmitSymbolAttribute(GVSym, MCSA_WeakDefAutoPrivate); } else if (MAI->hasLinkOnceDirective()) { // .globl _foo OutStreamer->EmitSymbolAttribute(GVSym, MCSA_Global); //NOTE: linkonce is handled by the section the symbol was assigned to. } else { // .weak _foo OutStreamer->EmitSymbolAttribute(GVSym, MCSA_Weak); } return; case GlobalValue::AppendingLinkage: // FIXME: appending linkage variables should go into a section of // their name or something. For now, just emit them as external. case GlobalValue::ExternalLinkage: // If external or appending, declare as a global symbol. // .globl _foo OutStreamer->EmitSymbolAttribute(GVSym, MCSA_Global); return; case GlobalValue::PrivateLinkage: case GlobalValue::InternalLinkage: return; case GlobalValue::AvailableExternallyLinkage: llvm_unreachable("Should never emit this"); case GlobalValue::ExternalWeakLinkage: llvm_unreachable("Don't know how to emit these"); } llvm_unreachable("Unknown linkage type!"); } void AsmPrinter::getNameWithPrefix(SmallVectorImpl<char> &Name, const GlobalValue *GV) const { TM.getNameWithPrefix(Name, GV, *Mang); } MCSymbol *AsmPrinter::getSymbol(const GlobalValue *GV) const { return TM.getSymbol(GV, *Mang); } /// EmitGlobalVariable - Emit the specified global variable to the .s file. void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) { if (GV->hasInitializer()) { // Check to see if this is a special global used by LLVM, if so, emit it. if (EmitSpecialLLVMGlobal(GV)) return; // Skip the emission of global equivalents. The symbol can be emitted later // on by emitGlobalGOTEquivs in case it turns out to be needed. if (GlobalGOTEquivs.count(getSymbol(GV))) return; if (isVerbose()) { GV->printAsOperand(OutStreamer->GetCommentOS(), /*PrintType=*/false, GV->getParent()); OutStreamer->GetCommentOS() << '\n'; } } MCSymbol *GVSym = getSymbol(GV); EmitVisibility(GVSym, GV->getVisibility(), !GV->isDeclaration()); if (!GV->hasInitializer()) // External globals require no extra code. return; GVSym->redefineIfPossible(); if (GVSym->isDefined() || GVSym->isVariable()) report_fatal_error("symbol '" + Twine(GVSym->getName()) + "' is already defined"); if (MAI->hasDotTypeDotSizeDirective()) OutStreamer->EmitSymbolAttribute(GVSym, MCSA_ELF_TypeObject); SectionKind GVKind = TargetLoweringObjectFile::getKindForGlobal(GV, TM); const DataLayout *DL = TM.getDataLayout(); uint64_t Size = DL->getTypeAllocSize(GV->getType()->getElementType()); // If the alignment is specified, we *must* obey it. Overaligning a global // with a specified alignment is a prompt way to break globals emitted to // sections and expected to be contiguous (e.g. ObjC metadata). unsigned AlignLog = getGVAlignmentLog2(GV, *DL); for (const HandlerInfo &HI : Handlers) { NamedRegionTimer T(HI.TimerName, HI.TimerGroupName, TimePassesIsEnabled); HI.Handler->setSymbolSize(GVSym, Size); } // Handle common and BSS local symbols (.lcomm). if (GVKind.isCommon() || GVKind.isBSSLocal()) { if (Size == 0) Size = 1; // .comm Foo, 0 is undefined, avoid it. unsigned Align = 1 << AlignLog; // Handle common symbols. if (GVKind.isCommon()) { if (!getObjFileLowering().getCommDirectiveSupportsAlignment()) Align = 0; // .comm _foo, 42, 4 OutStreamer->EmitCommonSymbol(GVSym, Size, Align); return; } // Handle local BSS symbols. if (MAI->hasMachoZeroFillDirective()) { MCSection *TheSection = getObjFileLowering().SectionForGlobal(GV, GVKind, *Mang, TM); // .zerofill __DATA, __bss, _foo, 400, 5 OutStreamer->EmitZerofill(TheSection, GVSym, Size, Align); return; } // Use .lcomm only if it supports user-specified alignment. // Otherwise, while it would still be correct to use .lcomm in some // cases (e.g. when Align == 1), the external assembler might enfore // some -unknown- default alignment behavior, which could cause // spurious differences between external and integrated assembler. // Prefer to simply fall back to .local / .comm in this case. if (MAI->getLCOMMDirectiveAlignmentType() != LCOMM::NoAlignment) { // .lcomm _foo, 42 OutStreamer->EmitLocalCommonSymbol(GVSym, Size, Align); return; } if (!getObjFileLowering().getCommDirectiveSupportsAlignment()) Align = 0; // .local _foo OutStreamer->EmitSymbolAttribute(GVSym, MCSA_Local); // .comm _foo, 42, 4 OutStreamer->EmitCommonSymbol(GVSym, Size, Align); return; } MCSection *TheSection = getObjFileLowering().SectionForGlobal(GV, GVKind, *Mang, TM); // Handle the zerofill directive on darwin, which is a special form of BSS // emission. if (GVKind.isBSSExtern() && MAI->hasMachoZeroFillDirective()) { if (Size == 0) Size = 1; // zerofill of 0 bytes is undefined. // .globl _foo OutStreamer->EmitSymbolAttribute(GVSym, MCSA_Global); // .zerofill __DATA, __common, _foo, 400, 5 OutStreamer->EmitZerofill(TheSection, GVSym, Size, 1 << AlignLog); return; } // Handle thread local data for mach-o which requires us to output an // additional structure of data and mangle the original symbol so that we // can reference it later. // // TODO: This should become an "emit thread local global" method on TLOF. // All of this macho specific stuff should be sunk down into TLOFMachO and // stuff like "TLSExtraDataSection" should no longer be part of the parent // TLOF class. This will also make it more obvious that stuff like // MCStreamer::EmitTBSSSymbol is macho specific and only called from macho // specific code. if (GVKind.isThreadLocal() && MAI->hasMachoTBSSDirective()) { // Emit the .tbss symbol MCSymbol *MangSym = OutContext.getOrCreateSymbol(GVSym->getName() + Twine("$tlv$init")); if (GVKind.isThreadBSS()) { TheSection = getObjFileLowering().getTLSBSSSection(); OutStreamer->EmitTBSSSymbol(TheSection, MangSym, Size, 1 << AlignLog); } else if (GVKind.isThreadData()) { OutStreamer->SwitchSection(TheSection); EmitAlignment(AlignLog, GV); OutStreamer->EmitLabel(MangSym); EmitGlobalConstant(GV->getInitializer()); } OutStreamer->AddBlankLine(); // Emit the variable struct for the runtime. MCSection *TLVSect = getObjFileLowering().getTLSExtraDataSection(); OutStreamer->SwitchSection(TLVSect); // Emit the linkage here. EmitLinkage(GV, GVSym); OutStreamer->EmitLabel(GVSym); // Three pointers in size: // - __tlv_bootstrap - used to make sure support exists // - spare pointer, used when mapped by the runtime // - pointer to mangled symbol above with initializer unsigned PtrSize = DL->getPointerTypeSize(GV->getType()); OutStreamer->EmitSymbolValue(GetExternalSymbolSymbol("_tlv_bootstrap"), PtrSize); OutStreamer->EmitIntValue(0, PtrSize); OutStreamer->EmitSymbolValue(MangSym, PtrSize); OutStreamer->AddBlankLine(); return; } OutStreamer->SwitchSection(TheSection); EmitLinkage(GV, GVSym); EmitAlignment(AlignLog, GV); OutStreamer->EmitLabel(GVSym); EmitGlobalConstant(GV->getInitializer()); if (MAI->hasDotTypeDotSizeDirective()) // .size foo, 42 OutStreamer->emitELFSize(cast<MCSymbolELF>(GVSym), MCConstantExpr::create(Size, OutContext)); OutStreamer->AddBlankLine(); } /// EmitFunctionHeader - This method emits the header for the current /// function. void AsmPrinter::EmitFunctionHeader() { // Print out constants referenced by the function EmitConstantPool(); // Print the 'header' of function. const Function *F = MF->getFunction(); OutStreamer->SwitchSection( getObjFileLowering().SectionForGlobal(F, *Mang, TM)); EmitVisibility(CurrentFnSym, F->getVisibility()); EmitLinkage(F, CurrentFnSym); if (MAI->hasFunctionAlignment()) EmitAlignment(MF->getAlignment(), F); if (MAI->hasDotTypeDotSizeDirective()) OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction); if (isVerbose()) { F->printAsOperand(OutStreamer->GetCommentOS(), /*PrintType=*/false, F->getParent()); OutStreamer->GetCommentOS() << '\n'; } // Emit the prefix data. if (F->hasPrefixData()) EmitGlobalConstant(F->getPrefixData()); // Emit the CurrentFnSym. This is a virtual function to allow targets to // do their wild and crazy things as required. EmitFunctionEntryLabel(); // If the function had address-taken blocks that got deleted, then we have // references to the dangling symbols. Emit them at the start of the function // so that we don't get references to undefined symbols. std::vector<MCSymbol*> DeadBlockSyms; MMI->takeDeletedSymbolsForFunction(F, DeadBlockSyms); for (unsigned i = 0, e = DeadBlockSyms.size(); i != e; ++i) { OutStreamer->AddComment("Address taken block that was later removed"); OutStreamer->EmitLabel(DeadBlockSyms[i]); } if (CurrentFnBegin) { if (MAI->useAssignmentForEHBegin()) { MCSymbol *CurPos = OutContext.createTempSymbol(); OutStreamer->EmitLabel(CurPos); OutStreamer->EmitAssignment(CurrentFnBegin, MCSymbolRefExpr::create(CurPos, OutContext)); } else { OutStreamer->EmitLabel(CurrentFnBegin); } } // Emit pre-function debug and/or EH information. for (const HandlerInfo &HI : Handlers) { NamedRegionTimer T(HI.TimerName, HI.TimerGroupName, TimePassesIsEnabled); HI.Handler->beginFunction(MF); } // Emit the prologue data. if (F->hasPrologueData()) EmitGlobalConstant(F->getPrologueData()); } /// EmitFunctionEntryLabel - Emit the label that is the entrypoint for the /// function. This can be overridden by targets as required to do custom stuff. void AsmPrinter::EmitFunctionEntryLabel() { CurrentFnSym->redefineIfPossible(); // The function label could have already been emitted if two symbols end up // conflicting due to asm renaming. Detect this and emit an error. if (CurrentFnSym->isVariable()) report_fatal_error("'" + Twine(CurrentFnSym->getName()) + "' is a protected alias"); if (CurrentFnSym->isDefined()) report_fatal_error("'" + Twine(CurrentFnSym->getName()) + "' label emitted multiple times to assembly file"); return OutStreamer->EmitLabel(CurrentFnSym); } /// emitComments - Pretty-print comments for instructions. static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) { const MachineFunction *MF = MI.getParent()->getParent(); const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); // Check for spills and reloads int FI; const MachineFrameInfo *FrameInfo = MF->getFrameInfo(); // We assume a single instruction only has a spill or reload, not // both. const MachineMemOperand *MMO; if (TII->isLoadFromStackSlotPostFE(&MI, FI)) { if (FrameInfo->isSpillSlotObjectIndex(FI)) { MMO = *MI.memoperands_begin(); CommentOS << MMO->getSize() << "-byte Reload\n"; } } else if (TII->hasLoadFromStackSlot(&MI, MMO, FI)) { if (FrameInfo->isSpillSlotObjectIndex(FI)) CommentOS << MMO->getSize() << "-byte Folded Reload\n"; } else if (TII->isStoreToStackSlotPostFE(&MI, FI)) { if (FrameInfo->isSpillSlotObjectIndex(FI)) { MMO = *MI.memoperands_begin(); CommentOS << MMO->getSize() << "-byte Spill\n"; } } else if (TII->hasStoreToStackSlot(&MI, MMO, FI)) { if (FrameInfo->isSpillSlotObjectIndex(FI)) CommentOS << MMO->getSize() << "-byte Folded Spill\n"; } // Check for spill-induced copies if (MI.getAsmPrinterFlag(MachineInstr::ReloadReuse)) CommentOS << " Reload Reuse\n"; } /// emitImplicitDef - This method emits the specified machine instruction /// that is an implicit def. void AsmPrinter::emitImplicitDef(const MachineInstr *MI) const { unsigned RegNo = MI->getOperand(0).getReg(); OutStreamer->AddComment(Twine("implicit-def: ") + MMI->getContext().getRegisterInfo()->getName(RegNo)); OutStreamer->AddBlankLine(); } static void emitKill(const MachineInstr *MI, AsmPrinter &AP) { std::string Str = "kill:"; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &Op = MI->getOperand(i); assert(Op.isReg() && "KILL instruction must have only register operands"); Str += ' '; Str += AP.MMI->getContext().getRegisterInfo()->getName(Op.getReg()); Str += (Op.isDef() ? "<def>" : "<kill>"); } AP.OutStreamer->AddComment(Str); AP.OutStreamer->AddBlankLine(); } /// emitDebugValueComment - This method handles the target-independent form /// of DBG_VALUE, returning true if it was able to do so. A false return /// means the target will need to handle MI in EmitInstruction. static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) { // This code handles only the 4-operand target-independent form. if (MI->getNumOperands() != 4) return false; SmallString<128> Str; raw_svector_ostream OS(Str); OS << "DEBUG_VALUE: "; const DILocalVariable *V = MI->getDebugVariable(); if (auto *SP = dyn_cast<DISubprogram>(V->getScope())) { StringRef Name = SP->getDisplayName(); if (!Name.empty()) OS << Name << ":"; } OS << V->getName(); const DIExpression *Expr = MI->getDebugExpression(); if (Expr->isBitPiece()) OS << " [bit_piece offset=" << Expr->getBitPieceOffset() << " size=" << Expr->getBitPieceSize() << "]"; OS << " <- "; // The second operand is only an offset if it's an immediate. bool Deref = MI->getOperand(0).isReg() && MI->getOperand(1).isImm(); int64_t Offset = Deref ? MI->getOperand(1).getImm() : 0; // Register or immediate value. Register 0 means undef. if (MI->getOperand(0).isFPImm()) { APFloat APF = APFloat(MI->getOperand(0).getFPImm()->getValueAPF()); if (MI->getOperand(0).getFPImm()->getType()->isFloatTy()) { OS << (double)APF.convertToFloat(); } else if (MI->getOperand(0).getFPImm()->getType()->isDoubleTy()) { OS << APF.convertToDouble(); } else { // There is no good way to print long double. Convert a copy to // double. Ah well, it's only a comment. bool ignored; APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &ignored); OS << "(long double) " << APF.convertToDouble(); } } else if (MI->getOperand(0).isImm()) { OS << MI->getOperand(0).getImm(); } else if (MI->getOperand(0).isCImm()) { MI->getOperand(0).getCImm()->getValue().print(OS, false /*isSigned*/); } else { unsigned Reg; if (MI->getOperand(0).isReg()) { Reg = MI->getOperand(0).getReg(); } else { assert(MI->getOperand(0).isFI() && "Unknown operand type"); const TargetFrameLowering *TFI = AP.MF->getSubtarget().getFrameLowering(); Offset += TFI->getFrameIndexReference(*AP.MF, MI->getOperand(0).getIndex(), Reg); Deref = true; } if (Reg == 0) { // Suppress offset, it is not meaningful here. OS << "undef"; // NOTE: Want this comment at start of line, don't emit with AddComment. AP.OutStreamer->emitRawComment(OS.str()); return true; } if (Deref) OS << '['; OS << AP.MMI->getContext().getRegisterInfo()->getName(Reg); } if (Deref) OS << '+' << Offset << ']'; // NOTE: Want this comment at start of line, don't emit with AddComment. AP.OutStreamer->emitRawComment(OS.str()); return true; } AsmPrinter::CFIMoveType AsmPrinter::needsCFIMoves() { if (MAI->getExceptionHandlingType() == ExceptionHandling::DwarfCFI && MF->getFunction()->needsUnwindTableEntry()) return CFI_M_EH; if (MMI->hasDebugInfo()) return CFI_M_Debug; return CFI_M_None; } bool AsmPrinter::needsSEHMoves() { return MAI->usesWindowsCFI() && MF->getFunction()->needsUnwindTableEntry(); } void AsmPrinter::emitCFIInstruction(const MachineInstr &MI) { ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType(); if (ExceptionHandlingType != ExceptionHandling::DwarfCFI && ExceptionHandlingType != ExceptionHandling::ARM) return; if (needsCFIMoves() == CFI_M_None) return; const MachineModuleInfo &MMI = MF->getMMI(); const std::vector<MCCFIInstruction> &Instrs = MMI.getFrameInstructions(); unsigned CFIIndex = MI.getOperand(0).getCFIIndex(); const MCCFIInstruction &CFI = Instrs[CFIIndex]; emitCFIInstruction(CFI); } void AsmPrinter::emitFrameAlloc(const MachineInstr &MI) { // The operands are the MCSymbol and the frame offset of the allocation. MCSymbol *FrameAllocSym = MI.getOperand(0).getMCSymbol(); int FrameOffset = MI.getOperand(1).getImm(); // Emit a symbol assignment. OutStreamer->EmitAssignment(FrameAllocSym, MCConstantExpr::create(FrameOffset, OutContext)); } /// EmitFunctionBody - This method emits the body and trailer for a /// function. void AsmPrinter::EmitFunctionBody() { EmitFunctionHeader(); // Emit target-specific gunk before the function body. EmitFunctionBodyStart(); bool ShouldPrintDebugScopes = MMI->hasDebugInfo(); // Print out code for the function. bool HasAnyRealCode = false; for (auto &MBB : *MF) { // Print a label for the basic block. EmitBasicBlockStart(MBB); for (auto &MI : MBB) { // Print the assembly for the instruction. if (!MI.isPosition() && !MI.isImplicitDef() && !MI.isKill() && !MI.isDebugValue()) { HasAnyRealCode = true; ++EmittedInsts; } if (ShouldPrintDebugScopes) { for (const HandlerInfo &HI : Handlers) { NamedRegionTimer T(HI.TimerName, HI.TimerGroupName, TimePassesIsEnabled); HI.Handler->beginInstruction(&MI); } } if (isVerbose()) emitComments(MI, OutStreamer->GetCommentOS()); switch (MI.getOpcode()) { case TargetOpcode::CFI_INSTRUCTION: emitCFIInstruction(MI); break; case TargetOpcode::LOCAL_ESCAPE: emitFrameAlloc(MI); break; case TargetOpcode::EH_LABEL: case TargetOpcode::GC_LABEL: OutStreamer->EmitLabel(MI.getOperand(0).getMCSymbol()); break; case TargetOpcode::INLINEASM: EmitInlineAsm(&MI); break; case TargetOpcode::DBG_VALUE: if (isVerbose()) { if (!emitDebugValueComment(&MI, *this)) EmitInstruction(&MI); } break; case TargetOpcode::IMPLICIT_DEF: if (isVerbose()) emitImplicitDef(&MI); break; case TargetOpcode::KILL: if (isVerbose()) emitKill(&MI, *this); break; default: EmitInstruction(&MI); break; } if (ShouldPrintDebugScopes) { for (const HandlerInfo &HI : Handlers) { NamedRegionTimer T(HI.TimerName, HI.TimerGroupName, TimePassesIsEnabled); HI.Handler->endInstruction(); } } } EmitBasicBlockEnd(MBB); } // If the function is empty and the object file uses .subsections_via_symbols, // then we need to emit *something* to the function body to prevent the // labels from collapsing together. Just emit a noop. if ((MAI->hasSubsectionsViaSymbols() && !HasAnyRealCode)) { MCInst Noop; MF->getSubtarget().getInstrInfo()->getNoopForMachoTarget(Noop); OutStreamer->AddComment("avoids zero-length function"); // Targets can opt-out of emitting the noop here by leaving the opcode // unspecified. if (Noop.getOpcode()) OutStreamer->EmitInstruction(Noop, getSubtargetInfo()); } const Function *F = MF->getFunction(); for (const auto &BB : *F) { if (!BB.hasAddressTaken()) continue; MCSymbol *Sym = GetBlockAddressSymbol(&BB); if (Sym->isDefined()) continue; OutStreamer->AddComment("Address of block that was removed by CodeGen"); OutStreamer->EmitLabel(Sym); } // Emit target-specific gunk after the function body. EmitFunctionBodyEnd(); if (!MMI->getLandingPads().empty() || MMI->hasDebugInfo() || MAI->hasDotTypeDotSizeDirective()) { // Create a symbol for the end of function. CurrentFnEnd = createTempSymbol("func_end"); OutStreamer->EmitLabel(CurrentFnEnd); } // If the target wants a .size directive for the size of the function, emit // it. if (MAI->hasDotTypeDotSizeDirective()) { // We can get the size as difference between the function label and the // temp label. const MCExpr *SizeExp = MCBinaryExpr::createSub( MCSymbolRefExpr::create(CurrentFnEnd, OutContext), MCSymbolRefExpr::create(CurrentFnSymForSize, OutContext), OutContext); if (auto Sym = dyn_cast<MCSymbolELF>(CurrentFnSym)) OutStreamer->emitELFSize(Sym, SizeExp); } for (const HandlerInfo &HI : Handlers) { NamedRegionTimer T(HI.TimerName, HI.TimerGroupName, TimePassesIsEnabled); HI.Handler->markFunctionEnd(); } // Print out jump tables referenced by the function. EmitJumpTableInfo(); // Emit post-function debug and/or EH information. for (const HandlerInfo &HI : Handlers) { NamedRegionTimer T(HI.TimerName, HI.TimerGroupName, TimePassesIsEnabled); HI.Handler->endFunction(MF); } MMI->EndFunction(); OutStreamer->AddBlankLine(); } /// \brief Compute the number of Global Variables that uses a Constant. static unsigned getNumGlobalVariableUses(const Constant *C) { if (!C) return 0; if (isa<GlobalVariable>(C)) return 1; unsigned NumUses = 0; for (auto *CU : C->users()) NumUses += getNumGlobalVariableUses(dyn_cast<Constant>(CU)); return NumUses; } /// \brief Only consider global GOT equivalents if at least one user is a /// cstexpr inside an initializer of another global variables. Also, don't /// handle cstexpr inside instructions. During global variable emission, /// candidates are skipped and are emitted later in case at least one cstexpr /// isn't replaced by a PC relative GOT entry access. static bool isGOTEquivalentCandidate(const GlobalVariable *GV, unsigned &NumGOTEquivUsers) { // Global GOT equivalents are unnamed private globals with a constant // pointer initializer to another global symbol. They must point to a // GlobalVariable or Function, i.e., as GlobalValue. if (!GV->hasUnnamedAddr() || !GV->hasInitializer() || !GV->isConstant() || !GV->isDiscardableIfUnused() || !dyn_cast<GlobalValue>(GV->getOperand(0))) return false; // To be a got equivalent, at least one of its users need to be a constant // expression used by another global variable. for (auto *U : GV->users()) NumGOTEquivUsers += getNumGlobalVariableUses(dyn_cast<Constant>(U)); return NumGOTEquivUsers > 0; } /// \brief Unnamed constant global variables solely contaning a pointer to /// another globals variable is equivalent to a GOT table entry; it contains the /// the address of another symbol. Optimize it and replace accesses to these /// "GOT equivalents" by using the GOT entry for the final global instead. /// Compute GOT equivalent candidates among all global variables to avoid /// emitting them if possible later on, after it use is replaced by a GOT entry /// access. void AsmPrinter::computeGlobalGOTEquivs(Module &M) { if (!getObjFileLowering().supportIndirectSymViaGOTPCRel()) return; for (const auto &G : M.globals()) { unsigned NumGOTEquivUsers = 0; if (!isGOTEquivalentCandidate(&G, NumGOTEquivUsers)) continue; const MCSymbol *GOTEquivSym = getSymbol(&G); GlobalGOTEquivs[GOTEquivSym] = std::make_pair(&G, NumGOTEquivUsers); } } /// \brief Constant expressions using GOT equivalent globals may not be eligible /// for PC relative GOT entry conversion, in such cases we need to emit such /// globals we previously omitted in EmitGlobalVariable. void AsmPrinter::emitGlobalGOTEquivs() { if (!getObjFileLowering().supportIndirectSymViaGOTPCRel()) return; SmallVector<const GlobalVariable *, 8> FailedCandidates; for (auto &I : GlobalGOTEquivs) { const GlobalVariable *GV = I.second.first; unsigned Cnt = I.second.second; if (Cnt) FailedCandidates.push_back(GV); } GlobalGOTEquivs.clear(); for (auto *GV : FailedCandidates) EmitGlobalVariable(GV); } bool AsmPrinter::doFinalization(Module &M) { // Set the MachineFunction to nullptr so that we can catch attempted // accesses to MF specific features at the module level and so that // we can conditionalize accesses based on whether or not it is nullptr. MF = nullptr; // Gather all GOT equivalent globals in the module. We really need two // passes over the globals: one to compute and another to avoid its emission // in EmitGlobalVariable, otherwise we would not be able to handle cases // where the got equivalent shows up before its use. computeGlobalGOTEquivs(M); // Emit global variables. for (const auto &G : M.globals()) EmitGlobalVariable(&G); // Emit remaining GOT equivalent globals. emitGlobalGOTEquivs(); // Emit visibility info for declarations for (const Function &F : M) { if (!F.isDeclarationForLinker()) continue; GlobalValue::VisibilityTypes V = F.getVisibility(); if (V == GlobalValue::DefaultVisibility) continue; MCSymbol *Name = getSymbol(&F); EmitVisibility(Name, V, false); } const TargetLoweringObjectFile &TLOF = getObjFileLowering(); // Emit module flags. SmallVector<Module::ModuleFlagEntry, 8> ModuleFlags; M.getModuleFlagsMetadata(ModuleFlags); if (!ModuleFlags.empty()) TLOF.emitModuleFlags(*OutStreamer, ModuleFlags, *Mang, TM); if (TM.getTargetTriple().isOSBinFormatELF()) { MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>(); // Output stubs for external and common global variables. MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList(); if (!Stubs.empty()) { OutStreamer->SwitchSection(TLOF.getDataRelSection()); const DataLayout *DL = TM.getDataLayout(); for (const auto &Stub : Stubs) { OutStreamer->EmitLabel(Stub.first); OutStreamer->EmitSymbolValue(Stub.second.getPointer(), DL->getPointerSize()); } } } // Make sure we wrote out everything we need. OutStreamer->Flush(); // Finalize debug and EH information. for (const HandlerInfo &HI : Handlers) { NamedRegionTimer T(HI.TimerName, HI.TimerGroupName, TimePassesIsEnabled); HI.Handler->endModule(); delete HI.Handler; } Handlers.clear(); DD = nullptr; // If the target wants to know about weak references, print them all. if (MAI->getWeakRefDirective()) { // FIXME: This is not lazy, it would be nice to only print weak references // to stuff that is actually used. Note that doing so would require targets // to notice uses in operands (due to constant exprs etc). This should // happen with the MC stuff eventually. // Print out module-level global variables here. for (const auto &G : M.globals()) { if (!G.hasExternalWeakLinkage()) continue; OutStreamer->EmitSymbolAttribute(getSymbol(&G), MCSA_WeakReference); } for (const auto &F : M) { if (!F.hasExternalWeakLinkage()) continue; OutStreamer->EmitSymbolAttribute(getSymbol(&F), MCSA_WeakReference); } } OutStreamer->AddBlankLine(); for (const auto &Alias : M.aliases()) { MCSymbol *Name = getSymbol(&Alias); if (Alias.hasExternalLinkage() || !MAI->getWeakRefDirective()) OutStreamer->EmitSymbolAttribute(Name, MCSA_Global); else if (Alias.hasWeakLinkage() || Alias.hasLinkOnceLinkage()) OutStreamer->EmitSymbolAttribute(Name, MCSA_WeakReference); else assert(Alias.hasLocalLinkage() && "Invalid alias linkage"); EmitVisibility(Name, Alias.getVisibility()); // Emit the directives as assignments aka .set: OutStreamer->EmitAssignment(Name, lowerConstant(Alias.getAliasee())); } GCModuleInfo *MI = getAnalysisIfAvailable<GCModuleInfo>(); assert(MI && "AsmPrinter didn't require GCModuleInfo?"); for (GCModuleInfo::iterator I = MI->end(), E = MI->begin(); I != E; ) if (GCMetadataPrinter *MP = GetOrCreateGCPrinter(**--I)) MP->finishAssembly(M, *MI, *this); // Emit llvm.ident metadata in an '.ident' directive. EmitModuleIdents(M); // Emit __morestack address if needed for indirect calls. if (MMI->usesMorestackAddr()) { MCSection *ReadOnlySection = getObjFileLowering().getSectionForConstant(SectionKind::getReadOnly(), /*C=*/nullptr); OutStreamer->SwitchSection(ReadOnlySection); MCSymbol *AddrSymbol = OutContext.getOrCreateSymbol(StringRef("__morestack_addr")); OutStreamer->EmitLabel(AddrSymbol); unsigned PtrSize = TM.getDataLayout()->getPointerSize(0); OutStreamer->EmitSymbolValue(GetExternalSymbolSymbol("__morestack"), PtrSize); } // If we don't have any trampolines, then we don't require stack memory // to be executable. Some targets have a directive to declare this. Function *InitTrampolineIntrinsic = M.getFunction("llvm.init.trampoline"); if (!InitTrampolineIntrinsic || InitTrampolineIntrinsic->use_empty()) if (MCSection *S = MAI->getNonexecutableStackSection(OutContext)) OutStreamer->SwitchSection(S); // Allow the target to emit any magic that it wants at the end of the file, // after everything else has gone out. EmitEndOfAsmFile(M); delete Mang; Mang = nullptr; MMI = nullptr; OutStreamer->Finish(); OutStreamer->reset(); return false; } MCSymbol *AsmPrinter::getCurExceptionSym() { if (!CurExceptionSym) CurExceptionSym = createTempSymbol("exception"); return CurExceptionSym; } void AsmPrinter::SetupMachineFunction(MachineFunction &MF) { this->MF = &MF; // Get the function symbol. CurrentFnSym = getSymbol(MF.getFunction()); CurrentFnSymForSize = CurrentFnSym; CurrentFnBegin = nullptr; CurExceptionSym = nullptr; bool NeedsLocalForSize = MAI->needsLocalForSize(); if (!MMI->getLandingPads().empty() || MMI->hasDebugInfo() || NeedsLocalForSize) { CurrentFnBegin = createTempSymbol("func_begin"); if (NeedsLocalForSize) CurrentFnSymForSize = CurrentFnBegin; } if (isVerbose()) LI = &getAnalysis<MachineLoopInfo>(); } namespace { // Keep track the alignment, constpool entries per Section. struct SectionCPs { MCSection *S; unsigned Alignment; SmallVector<unsigned, 4> CPEs; SectionCPs(MCSection *s, unsigned a) : S(s), Alignment(a) {} }; } /// EmitConstantPool - Print to the current output stream assembly /// representations of the constants in the constant pool MCP. This is /// used to print out constants which have been "spilled to memory" by /// the code generator. /// void AsmPrinter::EmitConstantPool() { const MachineConstantPool *MCP = MF->getConstantPool(); const std::vector<MachineConstantPoolEntry> &CP = MCP->getConstants(); if (CP.empty()) return; // Calculate sections for constant pool entries. We collect entries to go into // the same section together to reduce amount of section switch statements. SmallVector<SectionCPs, 4> CPSections; for (unsigned i = 0, e = CP.size(); i != e; ++i) { const MachineConstantPoolEntry &CPE = CP[i]; unsigned Align = CPE.getAlignment(); SectionKind Kind = CPE.getSectionKind(TM.getDataLayout()); const Constant *C = nullptr; if (!CPE.isMachineConstantPoolEntry()) C = CPE.Val.ConstVal; MCSection *S = getObjFileLowering().getSectionForConstant(Kind, C); // The number of sections are small, just do a linear search from the // last section to the first. bool Found = false; unsigned SecIdx = CPSections.size(); while (SecIdx != 0) { if (CPSections[--SecIdx].S == S) { Found = true; break; } } if (!Found) { SecIdx = CPSections.size(); CPSections.push_back(SectionCPs(S, Align)); } if (Align > CPSections[SecIdx].Alignment) CPSections[SecIdx].Alignment = Align; CPSections[SecIdx].CPEs.push_back(i); } // Now print stuff into the calculated sections. const MCSection *CurSection = nullptr; unsigned Offset = 0; for (unsigned i = 0, e = CPSections.size(); i != e; ++i) { for (unsigned j = 0, ee = CPSections[i].CPEs.size(); j != ee; ++j) { unsigned CPI = CPSections[i].CPEs[j]; MCSymbol *Sym = GetCPISymbol(CPI); if (!Sym->isUndefined()) continue; if (CurSection != CPSections[i].S) { OutStreamer->SwitchSection(CPSections[i].S); EmitAlignment(Log2_32(CPSections[i].Alignment)); CurSection = CPSections[i].S; Offset = 0; } MachineConstantPoolEntry CPE = CP[CPI]; // Emit inter-object padding for alignment. unsigned AlignMask = CPE.getAlignment() - 1; unsigned NewOffset = (Offset + AlignMask) & ~AlignMask; OutStreamer->EmitZeros(NewOffset - Offset); Type *Ty = CPE.getType(); Offset = NewOffset + TM.getDataLayout()->getTypeAllocSize(Ty); OutStreamer->EmitLabel(Sym); if (CPE.isMachineConstantPoolEntry()) EmitMachineConstantPoolValue(CPE.Val.MachineCPVal); else EmitGlobalConstant(CPE.Val.ConstVal); } } } /// EmitJumpTableInfo - Print assembly representations of the jump tables used /// by the current function to the current output stream. /// void AsmPrinter::EmitJumpTableInfo() { const DataLayout *DL = MF->getTarget().getDataLayout(); const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); if (!MJTI) return; if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_Inline) return; const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); if (JT.empty()) return; // Pick the directive to use to print the jump table entries, and switch to // the appropriate section. const Function *F = MF->getFunction(); const TargetLoweringObjectFile &TLOF = getObjFileLowering(); bool JTInDiffSection = !TLOF.shouldPutJumpTableInFunctionSection( MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32, *F); if (JTInDiffSection) { // Drop it in the readonly section. MCSection *ReadOnlySection = TLOF.getSectionForJumpTable(*F, *Mang, TM); OutStreamer->SwitchSection(ReadOnlySection); } EmitAlignment(Log2_32( MJTI->getEntryAlignment(*TM.getDataLayout()))); // Jump tables in code sections are marked with a data_region directive // where that's supported. if (!JTInDiffSection) OutStreamer->EmitDataRegion(MCDR_DataRegionJT32); for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) { const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs; // If this jump table was deleted, ignore it. if (JTBBs.empty()) continue; // For the EK_LabelDifference32 entry, if using .set avoids a relocation, /// emit a .set directive for each unique entry. if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32 && MAI->doesSetDirectiveSuppressesReloc()) { SmallPtrSet<const MachineBasicBlock*, 16> EmittedSets; const TargetLowering *TLI = MF->getSubtarget().getTargetLowering(); const MCExpr *Base = TLI->getPICJumpTableRelocBaseExpr(MF,JTI,OutContext); for (unsigned ii = 0, ee = JTBBs.size(); ii != ee; ++ii) { const MachineBasicBlock *MBB = JTBBs[ii]; if (!EmittedSets.insert(MBB).second) continue; // .set LJTSet, LBB32-base const MCExpr *LHS = MCSymbolRefExpr::create(MBB->getSymbol(), OutContext); OutStreamer->EmitAssignment(GetJTSetSymbol(JTI, MBB->getNumber()), MCBinaryExpr::createSub(LHS, Base, OutContext)); } } // On some targets (e.g. Darwin) we want to emit two consecutive labels // before each jump table. The first label is never referenced, but tells // the assembler and linker the extents of the jump table object. The // second label is actually referenced by the code. if (JTInDiffSection && DL->hasLinkerPrivateGlobalPrefix()) // FIXME: This doesn't have to have any specific name, just any randomly // named and numbered 'l' label would work. Simplify GetJTISymbol. OutStreamer->EmitLabel(GetJTISymbol(JTI, true)); OutStreamer->EmitLabel(GetJTISymbol(JTI)); for (unsigned ii = 0, ee = JTBBs.size(); ii != ee; ++ii) EmitJumpTableEntry(MJTI, JTBBs[ii], JTI); } if (!JTInDiffSection) OutStreamer->EmitDataRegion(MCDR_DataRegionEnd); } /// EmitJumpTableEntry - Emit a jump table entry for the specified MBB to the /// current stream. void AsmPrinter::EmitJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned UID) const { assert(MBB && MBB->getNumber() >= 0 && "Invalid basic block"); const MCExpr *Value = nullptr; switch (MJTI->getEntryKind()) { case MachineJumpTableInfo::EK_Inline: llvm_unreachable("Cannot emit EK_Inline jump table entry"); case MachineJumpTableInfo::EK_Custom32: Value = MF->getSubtarget().getTargetLowering()->LowerCustomJumpTableEntry( MJTI, MBB, UID, OutContext); break; case MachineJumpTableInfo::EK_BlockAddress: // EK_BlockAddress - Each entry is a plain address of block, e.g.: // .word LBB123 Value = MCSymbolRefExpr::create(MBB->getSymbol(), OutContext); break; case MachineJumpTableInfo::EK_GPRel32BlockAddress: { // EK_GPRel32BlockAddress - Each entry is an address of block, encoded // with a relocation as gp-relative, e.g.: // .gprel32 LBB123 MCSymbol *MBBSym = MBB->getSymbol(); OutStreamer->EmitGPRel32Value(MCSymbolRefExpr::create(MBBSym, OutContext)); return; } case MachineJumpTableInfo::EK_GPRel64BlockAddress: { // EK_GPRel64BlockAddress - Each entry is an address of block, encoded // with a relocation as gp-relative, e.g.: // .gpdword LBB123 MCSymbol *MBBSym = MBB->getSymbol(); OutStreamer->EmitGPRel64Value(MCSymbolRefExpr::create(MBBSym, OutContext)); return; } case MachineJumpTableInfo::EK_LabelDifference32: { // Each entry is the address of the block minus the address of the jump // table. This is used for PIC jump tables where gprel32 is not supported. // e.g.: // .word LBB123 - LJTI1_2 // If the .set directive avoids relocations, this is emitted as: // .set L4_5_set_123, LBB123 - LJTI1_2 // .word L4_5_set_123 if (MAI->doesSetDirectiveSuppressesReloc()) { Value = MCSymbolRefExpr::create(GetJTSetSymbol(UID, MBB->getNumber()), OutContext); break; } Value = MCSymbolRefExpr::create(MBB->getSymbol(), OutContext); const TargetLowering *TLI = MF->getSubtarget().getTargetLowering(); const MCExpr *Base = TLI->getPICJumpTableRelocBaseExpr(MF, UID, OutContext); Value = MCBinaryExpr::createSub(Value, Base, OutContext); break; } } assert(Value && "Unknown entry kind!"); unsigned EntrySize = MJTI->getEntrySize(*TM.getDataLayout()); OutStreamer->EmitValue(Value, EntrySize); } /// EmitSpecialLLVMGlobal - Check to see if the specified global is a /// special global used by LLVM. If so, emit it and return true, otherwise /// do nothing and return false. bool AsmPrinter::EmitSpecialLLVMGlobal(const GlobalVariable *GV) { if (GV->getName() == "llvm.used") { if (MAI->hasNoDeadStrip()) // No need to emit this at all. EmitLLVMUsedList(cast<ConstantArray>(GV->getInitializer())); return true; } // Ignore debug and non-emitted data. This handles llvm.compiler.used. if (StringRef(GV->getSection()) == "llvm.metadata" || GV->hasAvailableExternallyLinkage()) return true; if (!GV->hasAppendingLinkage()) return false; assert(GV->hasInitializer() && "Not a special LLVM global!"); if (GV->getName() == "llvm.global_ctors") { EmitXXStructorList(GV->getInitializer(), /* isCtor */ true); if (TM.getRelocationModel() == Reloc::Static && MAI->hasStaticCtorDtorReferenceInStaticMode()) { StringRef Sym(".constructors_used"); OutStreamer->EmitSymbolAttribute(OutContext.getOrCreateSymbol(Sym), MCSA_Reference); } return true; } if (GV->getName() == "llvm.global_dtors") { EmitXXStructorList(GV->getInitializer(), /* isCtor */ false); if (TM.getRelocationModel() == Reloc::Static && MAI->hasStaticCtorDtorReferenceInStaticMode()) { StringRef Sym(".destructors_used"); OutStreamer->EmitSymbolAttribute(OutContext.getOrCreateSymbol(Sym), MCSA_Reference); } return true; } return false; } /// EmitLLVMUsedList - For targets that define a MAI::UsedDirective, mark each /// global in the specified llvm.used list for which emitUsedDirectiveFor /// is true, as being used with this directive. void AsmPrinter::EmitLLVMUsedList(const ConstantArray *InitList) { // Should be an array of 'i8*'. for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) { const GlobalValue *GV = dyn_cast<GlobalValue>(InitList->getOperand(i)->stripPointerCasts()); if (GV) OutStreamer->EmitSymbolAttribute(getSymbol(GV), MCSA_NoDeadStrip); } } namespace { struct Structor { Structor() : Priority(0), Func(nullptr), ComdatKey(nullptr) {} int Priority; llvm::Constant *Func; llvm::GlobalValue *ComdatKey; }; } // end namespace /// EmitXXStructorList - Emit the ctor or dtor list taking into account the init /// priority. void AsmPrinter::EmitXXStructorList(const Constant *List, bool isCtor) { // Should be an array of '{ int, void ()* }' structs. The first value is the // init priority. if (!isa<ConstantArray>(List)) return; // Sanity check the structors list. const ConstantArray *InitList = dyn_cast<ConstantArray>(List); if (!InitList) return; // Not an array! StructType *ETy = dyn_cast<StructType>(InitList->getType()->getElementType()); // FIXME: Only allow the 3-field form in LLVM 4.0. if (!ETy || ETy->getNumElements() < 2 || ETy->getNumElements() > 3) return; // Not an array of two or three elements! if (!isa<IntegerType>(ETy->getTypeAtIndex(0U)) || !isa<PointerType>(ETy->getTypeAtIndex(1U))) return; // Not (int, ptr). if (ETy->getNumElements() == 3 && !isa<PointerType>(ETy->getTypeAtIndex(2U))) return; // Not (int, ptr, ptr). // Gather the structors in a form that's convenient for sorting by priority. SmallVector<Structor, 8> Structors; for (Value *O : InitList->operands()) { ConstantStruct *CS = dyn_cast<ConstantStruct>(O); if (!CS) continue; // Malformed. if (CS->getOperand(1)->isNullValue()) break; // Found a null terminator, skip the rest. ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0)); if (!Priority) continue; // Malformed. Structors.push_back(Structor()); Structor &S = Structors.back(); S.Priority = Priority->getLimitedValue(65535); S.Func = CS->getOperand(1); if (ETy->getNumElements() == 3 && !CS->getOperand(2)->isNullValue()) S.ComdatKey = dyn_cast<GlobalValue>(CS->getOperand(2)->stripPointerCasts()); } // Emit the function pointers in the target-specific order const DataLayout *DL = TM.getDataLayout(); unsigned Align = Log2_32(DL->getPointerPrefAlignment()); std::stable_sort(Structors.begin(), Structors.end(), [](const Structor &L, const Structor &R) { return L.Priority < R.Priority; }); for (Structor &S : Structors) { const TargetLoweringObjectFile &Obj = getObjFileLowering(); const MCSymbol *KeySym = nullptr; if (GlobalValue *GV = S.ComdatKey) { if (GV->hasAvailableExternallyLinkage()) // If the associated variable is available_externally, some other TU // will provide its dynamic initializer. continue; KeySym = getSymbol(GV); } MCSection *OutputSection = (isCtor ? Obj.getStaticCtorSection(S.Priority, KeySym) : Obj.getStaticDtorSection(S.Priority, KeySym)); OutStreamer->SwitchSection(OutputSection); if (OutStreamer->getCurrentSection() != OutStreamer->getPreviousSection()) EmitAlignment(Align); EmitXXStructor(S.Func); } } void AsmPrinter::EmitModuleIdents(Module &M) { if (!MAI->hasIdentDirective()) return; if (const NamedMDNode *NMD = M.getNamedMetadata("llvm.ident")) { for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) { const MDNode *N = NMD->getOperand(i); assert(N->getNumOperands() == 1 && "llvm.ident metadata entry can have only one operand"); const MDString *S = cast<MDString>(N->getOperand(0)); OutStreamer->EmitIdent(S->getString()); } } } //===--------------------------------------------------------------------===// // Emission and print routines // /// EmitInt8 - Emit a byte directive and value. /// void AsmPrinter::EmitInt8(int Value) const { OutStreamer->EmitIntValue(Value, 1); } /// EmitInt16 - Emit a short directive and value. /// void AsmPrinter::EmitInt16(int Value) const { OutStreamer->EmitIntValue(Value, 2); } /// EmitInt32 - Emit a long directive and value. /// void AsmPrinter::EmitInt32(int Value) const { OutStreamer->EmitIntValue(Value, 4); } /// Emit something like ".long Hi-Lo" where the size in bytes of the directive /// is specified by Size and Hi/Lo specify the labels. This implicitly uses /// .set if it avoids relocations. void AsmPrinter::EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo, unsigned Size) const { OutStreamer->emitAbsoluteSymbolDiff(Hi, Lo, Size); } /// EmitLabelPlusOffset - Emit something like ".long Label+Offset" /// where the size in bytes of the directive is specified by Size and Label /// specifies the label. This implicitly uses .set if it is available. void AsmPrinter::EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset, unsigned Size, bool IsSectionRelative) const { if (MAI->needsDwarfSectionOffsetDirective() && IsSectionRelative) { OutStreamer->EmitCOFFSecRel32(Label); return; } // Emit Label+Offset (or just Label if Offset is zero) const MCExpr *Expr = MCSymbolRefExpr::create(Label, OutContext); if (Offset) Expr = MCBinaryExpr::createAdd( Expr, MCConstantExpr::create(Offset, OutContext), OutContext); OutStreamer->EmitValue(Expr, Size); } //===----------------------------------------------------------------------===// // EmitAlignment - Emit an alignment directive to the specified power of // two boundary. For example, if you pass in 3 here, you will get an 8 // byte alignment. If a global value is specified, and if that global has // an explicit alignment requested, it will override the alignment request // if required for correctness. // void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalObject *GV) const { if (GV) NumBits = getGVAlignmentLog2(GV, *TM.getDataLayout(), NumBits); if (NumBits == 0) return; // 1-byte aligned: no need to emit alignment. assert(NumBits < static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && "undefined behavior"); if (getCurrentSection()->getKind().isText()) OutStreamer->EmitCodeAlignment(1u << NumBits); else OutStreamer->EmitValueToAlignment(1u << NumBits); } //===----------------------------------------------------------------------===// // Constant emission. //===----------------------------------------------------------------------===// const MCExpr *AsmPrinter::lowerConstant(const Constant *CV) { MCContext &Ctx = OutContext; if (CV->isNullValue() || isa<UndefValue>(CV)) return MCConstantExpr::create(0, Ctx); if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) return MCConstantExpr::create(CI->getZExtValue(), Ctx); if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) return MCSymbolRefExpr::create(getSymbol(GV), Ctx); if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV)) return MCSymbolRefExpr::create(GetBlockAddressSymbol(BA), Ctx); const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV); if (!CE) { llvm_unreachable("Unknown constant value to lower!"); } if (const MCExpr *RelocExpr = getObjFileLowering().getExecutableRelativeSymbol(CE, *Mang, TM)) return RelocExpr; switch (CE->getOpcode()) { default: // If the code isn't optimized, there may be outstanding folding // opportunities. Attempt to fold the expression using DataLayout as a // last resort before giving up. if (Constant *C = ConstantFoldConstantExpression(CE, *TM.getDataLayout())) if (C != CE) return lowerConstant(C); // Otherwise report the problem to the user. { std::string S; raw_string_ostream OS(S); OS << "Unsupported expression in static initializer: "; CE->printAsOperand(OS, /*PrintType=*/false, !MF ? nullptr : MF->getFunction()->getParent()); report_fatal_error(OS.str()); } case Instruction::GetElementPtr: { const DataLayout &DL = *TM.getDataLayout(); // Generate a symbolic expression for the byte address APInt OffsetAI(DL.getPointerTypeSizeInBits(CE->getType()), 0); cast<GEPOperator>(CE)->accumulateConstantOffset(DL, OffsetAI); const MCExpr *Base = lowerConstant(CE->getOperand(0)); if (!OffsetAI) return Base; int64_t Offset = OffsetAI.getSExtValue(); return MCBinaryExpr::createAdd(Base, MCConstantExpr::create(Offset, Ctx), Ctx); } case Instruction::Trunc: // We emit the value and depend on the assembler to truncate the generated // expression properly. This is important for differences between // blockaddress labels. Since the two labels are in the same function, it // is reasonable to treat their delta as a 32-bit value. // FALL THROUGH. case Instruction::BitCast: return lowerConstant(CE->getOperand(0)); case Instruction::IntToPtr: { const DataLayout &DL = *TM.getDataLayout(); // Handle casts to pointers by changing them into casts to the appropriate // integer type. This promotes constant folding and simplifies this code. Constant *Op = CE->getOperand(0); Op = ConstantExpr::getIntegerCast(Op, DL.getIntPtrType(CV->getType()), false/*ZExt*/); return lowerConstant(Op); } case Instruction::PtrToInt: { const DataLayout &DL = *TM.getDataLayout(); // Support only foldable casts to/from pointers that can be eliminated by // changing the pointer to the appropriately sized integer type. Constant *Op = CE->getOperand(0); Type *Ty = CE->getType(); const MCExpr *OpExpr = lowerConstant(Op); // We can emit the pointer value into this slot if the slot is an // integer slot equal to the size of the pointer. if (DL.getTypeAllocSize(Ty) == DL.getTypeAllocSize(Op->getType())) return OpExpr; // Otherwise the pointer is smaller than the resultant integer, mask off // the high bits so we are sure to get a proper truncation if the input is // a constant expr. unsigned InBits = DL.getTypeAllocSizeInBits(Op->getType()); const MCExpr *MaskExpr = MCConstantExpr::create(~0ULL >> (64-InBits), Ctx); return MCBinaryExpr::createAnd(OpExpr, MaskExpr, Ctx); } // The MC library also has a right-shift operator, but it isn't consistently // signed or unsigned between different targets. case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::SDiv: case Instruction::SRem: case Instruction::Shl: case Instruction::And: case Instruction::Or: case Instruction::Xor: { const MCExpr *LHS = lowerConstant(CE->getOperand(0)); const MCExpr *RHS = lowerConstant(CE->getOperand(1)); switch (CE->getOpcode()) { default: llvm_unreachable("Unknown binary operator constant cast expr"); case Instruction::Add: return MCBinaryExpr::createAdd(LHS, RHS, Ctx); case Instruction::Sub: return MCBinaryExpr::createSub(LHS, RHS, Ctx); case Instruction::Mul: return MCBinaryExpr::createMul(LHS, RHS, Ctx); case Instruction::SDiv: return MCBinaryExpr::createDiv(LHS, RHS, Ctx); case Instruction::SRem: return MCBinaryExpr::createMod(LHS, RHS, Ctx); case Instruction::Shl: return MCBinaryExpr::createShl(LHS, RHS, Ctx); case Instruction::And: return MCBinaryExpr::createAnd(LHS, RHS, Ctx); case Instruction::Or: return MCBinaryExpr::createOr (LHS, RHS, Ctx); case Instruction::Xor: return MCBinaryExpr::createXor(LHS, RHS, Ctx); } } } } static void emitGlobalConstantImpl(const Constant *C, AsmPrinter &AP, const Constant *BaseCV = nullptr, uint64_t Offset = 0); /// isRepeatedByteSequence - Determine whether the given value is /// composed of a repeated sequence of identical bytes and return the /// byte value. If it is not a repeated sequence, return -1. static int isRepeatedByteSequence(const ConstantDataSequential *V) { StringRef Data = V->getRawDataValues(); assert(!Data.empty() && "Empty aggregates should be CAZ node"); char C = Data[0]; for (unsigned i = 1, e = Data.size(); i != e; ++i) if (Data[i] != C) return -1; return static_cast<uint8_t>(C); // Ensure 255 is not returned as -1. } /// isRepeatedByteSequence - Determine whether the given value is /// composed of a repeated sequence of identical bytes and return the /// byte value. If it is not a repeated sequence, return -1. static int isRepeatedByteSequence(const Value *V, TargetMachine &TM) { if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { uint64_t Size = TM.getDataLayout()->getTypeAllocSizeInBits(V->getType()); assert(Size % 8 == 0); // Extend the element to take zero padding into account. APInt Value = CI->getValue().zextOrSelf(Size); if (!Value.isSplat(8)) return -1; return Value.zextOrTrunc(8).getZExtValue(); } if (const ConstantArray *CA = dyn_cast<ConstantArray>(V)) { // Make sure all array elements are sequences of the same repeated // byte. assert(CA->getNumOperands() != 0 && "Should be a CAZ"); Constant *Op0 = CA->getOperand(0); int Byte = isRepeatedByteSequence(Op0, TM); if (Byte == -1) return -1; // All array elements must be equal. for (unsigned i = 1, e = CA->getNumOperands(); i != e; ++i) if (CA->getOperand(i) != Op0) return -1; return Byte; } if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) return isRepeatedByteSequence(CDS); return -1; } static void emitGlobalConstantDataSequential(const ConstantDataSequential *CDS, AsmPrinter &AP){ // See if we can aggregate this into a .fill, if so, emit it as such. int Value = isRepeatedByteSequence(CDS, AP.TM); if (Value != -1) { uint64_t Bytes = AP.TM.getDataLayout()->getTypeAllocSize( CDS->getType()); // Don't emit a 1-byte object as a .fill. if (Bytes > 1) return AP.OutStreamer->EmitFill(Bytes, Value); } // If this can be emitted with .ascii/.asciz, emit it as such. if (CDS->isString()) return AP.OutStreamer->EmitBytes(CDS->getAsString()); // Otherwise, emit the values in successive locations. unsigned ElementByteSize = CDS->getElementByteSize(); if (isa<IntegerType>(CDS->getElementType())) { for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { if (AP.isVerbose()) AP.OutStreamer->GetCommentOS() << format("0x%" PRIx64 "\n", CDS->getElementAsInteger(i)); AP.OutStreamer->EmitIntValue(CDS->getElementAsInteger(i), ElementByteSize); } } else if (ElementByteSize == 4) { // FP Constants are printed as integer constants to avoid losing // precision. assert(CDS->getElementType()->isFloatTy()); for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { union { float F; uint32_t I; }; F = CDS->getElementAsFloat(i); if (AP.isVerbose()) AP.OutStreamer->GetCommentOS() << "float " << F << '\n'; AP.OutStreamer->EmitIntValue(I, 4); } } else { assert(CDS->getElementType()->isDoubleTy()); for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { union { double F; uint64_t I; }; F = CDS->getElementAsDouble(i); if (AP.isVerbose()) AP.OutStreamer->GetCommentOS() << "double " << F << '\n'; AP.OutStreamer->EmitIntValue(I, 8); } } const DataLayout &DL = *AP.TM.getDataLayout(); unsigned Size = DL.getTypeAllocSize(CDS->getType()); unsigned EmittedSize = DL.getTypeAllocSize(CDS->getType()->getElementType()) * CDS->getNumElements(); if (unsigned Padding = Size - EmittedSize) AP.OutStreamer->EmitZeros(Padding); } static void emitGlobalConstantArray(const ConstantArray *CA, AsmPrinter &AP, const Constant *BaseCV, uint64_t Offset) { // See if we can aggregate some values. Make sure it can be // represented as a series of bytes of the constant value. int Value = isRepeatedByteSequence(CA, AP.TM); const DataLayout &DL = *AP.TM.getDataLayout(); if (Value != -1) { uint64_t Bytes = DL.getTypeAllocSize(CA->getType()); AP.OutStreamer->EmitFill(Bytes, Value); } else { for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i) { emitGlobalConstantImpl(CA->getOperand(i), AP, BaseCV, Offset); Offset += DL.getTypeAllocSize(CA->getOperand(i)->getType()); } } } static void emitGlobalConstantVector(const ConstantVector *CV, AsmPrinter &AP) { for (unsigned i = 0, e = CV->getType()->getNumElements(); i != e; ++i) emitGlobalConstantImpl(CV->getOperand(i), AP); const DataLayout &DL = *AP.TM.getDataLayout(); unsigned Size = DL.getTypeAllocSize(CV->getType()); unsigned EmittedSize = DL.getTypeAllocSize(CV->getType()->getElementType()) * CV->getType()->getNumElements(); if (unsigned Padding = Size - EmittedSize) AP.OutStreamer->EmitZeros(Padding); } static void emitGlobalConstantStruct(const ConstantStruct *CS, AsmPrinter &AP, const Constant *BaseCV, uint64_t Offset) { // Print the fields in successive locations. Pad to align if needed! const DataLayout *DL = AP.TM.getDataLayout(); unsigned Size = DL->getTypeAllocSize(CS->getType()); const StructLayout *Layout = DL->getStructLayout(CS->getType()); uint64_t SizeSoFar = 0; for (unsigned i = 0, e = CS->getNumOperands(); i != e; ++i) { const Constant *Field = CS->getOperand(i); // Print the actual field value. emitGlobalConstantImpl(Field, AP, BaseCV, Offset+SizeSoFar); // Check if padding is needed and insert one or more 0s. uint64_t FieldSize = DL->getTypeAllocSize(Field->getType()); uint64_t PadSize = ((i == e-1 ? Size : Layout->getElementOffset(i+1)) - Layout->getElementOffset(i)) - FieldSize; SizeSoFar += FieldSize + PadSize; // Insert padding - this may include padding to increase the size of the // current field up to the ABI size (if the struct is not packed) as well // as padding to ensure that the next field starts at the right offset. AP.OutStreamer->EmitZeros(PadSize); } assert(SizeSoFar == Layout->getSizeInBytes() && "Layout of constant struct may be incorrect!"); } static void emitGlobalConstantFP(const ConstantFP *CFP, AsmPrinter &AP) { APInt API = CFP->getValueAPF().bitcastToAPInt(); // First print a comment with what we think the original floating-point value // should have been. if (AP.isVerbose()) { SmallString<8> StrVal; CFP->getValueAPF().toString(StrVal); if (CFP->getType()) CFP->getType()->print(AP.OutStreamer->GetCommentOS()); else AP.OutStreamer->GetCommentOS() << "Printing <null> Type"; AP.OutStreamer->GetCommentOS() << ' ' << StrVal << '\n'; } // Now iterate through the APInt chunks, emitting them in endian-correct // order, possibly with a smaller chunk at beginning/end (e.g. for x87 80-bit // floats). unsigned NumBytes = API.getBitWidth() / 8; unsigned TrailingBytes = NumBytes % sizeof(uint64_t); const uint64_t *p = API.getRawData(); // PPC's long double has odd notions of endianness compared to how LLVM // handles it: p[0] goes first for *big* endian on PPC. if (AP.TM.getDataLayout()->isBigEndian() && !CFP->getType()->isPPC_FP128Ty()) { int Chunk = API.getNumWords() - 1; if (TrailingBytes) AP.OutStreamer->EmitIntValue(p[Chunk--], TrailingBytes); for (; Chunk >= 0; --Chunk) AP.OutStreamer->EmitIntValue(p[Chunk], sizeof(uint64_t)); } else { unsigned Chunk; for (Chunk = 0; Chunk < NumBytes / sizeof(uint64_t); ++Chunk) AP.OutStreamer->EmitIntValue(p[Chunk], sizeof(uint64_t)); if (TrailingBytes) AP.OutStreamer->EmitIntValue(p[Chunk], TrailingBytes); } // Emit the tail padding for the long double. const DataLayout &DL = *AP.TM.getDataLayout(); AP.OutStreamer->EmitZeros(DL.getTypeAllocSize(CFP->getType()) - DL.getTypeStoreSize(CFP->getType())); } static void emitGlobalConstantLargeInt(const ConstantInt *CI, AsmPrinter &AP) { const DataLayout *DL = AP.TM.getDataLayout(); unsigned BitWidth = CI->getBitWidth(); // Copy the value as we may massage the layout for constants whose bit width // is not a multiple of 64-bits. APInt Realigned(CI->getValue()); uint64_t ExtraBits = 0; unsigned ExtraBitsSize = BitWidth & 63; if (ExtraBitsSize) { // The bit width of the data is not a multiple of 64-bits. // The extra bits are expected to be at the end of the chunk of the memory. // Little endian: // * Nothing to be done, just record the extra bits to emit. // Big endian: // * Record the extra bits to emit. // * Realign the raw data to emit the chunks of 64-bits. if (DL->isBigEndian()) { // Basically the structure of the raw data is a chunk of 64-bits cells: // 0 1 BitWidth / 64 // [chunk1][chunk2] ... [chunkN]. // The most significant chunk is chunkN and it should be emitted first. // However, due to the alignment issue chunkN contains useless bits. // Realign the chunks so that they contain only useless information: // ExtraBits 0 1 (BitWidth / 64) - 1 // chu[nk1 chu][nk2 chu] ... [nkN-1 chunkN] ExtraBits = Realigned.getRawData()[0] & (((uint64_t)-1) >> (64 - ExtraBitsSize)); Realigned = Realigned.lshr(ExtraBitsSize); } else ExtraBits = Realigned.getRawData()[BitWidth / 64]; } // We don't expect assemblers to support integer data directives // for more than 64 bits, so we emit the data in at most 64-bit // quantities at a time. const uint64_t *RawData = Realigned.getRawData(); for (unsigned i = 0, e = BitWidth / 64; i != e; ++i) { uint64_t Val = DL->isBigEndian() ? RawData[e - i - 1] : RawData[i]; AP.OutStreamer->EmitIntValue(Val, 8); } if (ExtraBitsSize) { // Emit the extra bits after the 64-bits chunks. // Emit a directive that fills the expected size. uint64_t Size = AP.TM.getDataLayout()->getTypeAllocSize( CI->getType()); Size -= (BitWidth / 64) * 8; assert(Size && Size * 8 >= ExtraBitsSize && (ExtraBits & (((uint64_t)-1) >> (64 - ExtraBitsSize))) == ExtraBits && "Directive too small for extra bits."); AP.OutStreamer->EmitIntValue(ExtraBits, Size); } } /// \brief Transform a not absolute MCExpr containing a reference to a GOT /// equivalent global, by a target specific GOT pc relative access to the /// final symbol. static void handleIndirectSymViaGOTPCRel(AsmPrinter &AP, const MCExpr **ME, const Constant *BaseCst, uint64_t Offset) { // The global @foo below illustrates a global that uses a got equivalent. // // @bar = global i32 42 // @gotequiv = private unnamed_addr constant i32* @bar // @foo = i32 trunc (i64 sub (i64 ptrtoint (i32** @gotequiv to i64), // i64 ptrtoint (i32* @foo to i64)) // to i32) // // The cstexpr in @foo is converted into the MCExpr `ME`, where we actually // check whether @foo is suitable to use a GOTPCREL. `ME` is usually in the // form: // // foo = cstexpr, where // cstexpr := <gotequiv> - "." + <cst> // cstexpr := <gotequiv> - (<foo> - <offset from @foo base>) + <cst> // // After canonicalization by evaluateAsRelocatable `ME` turns into: // // cstexpr := <gotequiv> - <foo> + gotpcrelcst, where // gotpcrelcst := <offset from @foo base> + <cst> // MCValue MV; if (!(*ME)->evaluateAsRelocatable(MV, nullptr, nullptr) || MV.isAbsolute()) return; const MCSymbolRefExpr *SymA = MV.getSymA(); if (!SymA) return; // Check that GOT equivalent symbol is cached. const MCSymbol *GOTEquivSym = &SymA->getSymbol(); if (!AP.GlobalGOTEquivs.count(GOTEquivSym)) return; const GlobalValue *BaseGV = dyn_cast<GlobalValue>(BaseCst); if (!BaseGV) return; // Check for a valid base symbol const MCSymbol *BaseSym = AP.getSymbol(BaseGV); const MCSymbolRefExpr *SymB = MV.getSymB(); if (!SymB || BaseSym != &SymB->getSymbol()) return; // Make sure to match: // // gotpcrelcst := <offset from @foo base> + <cst> // // If gotpcrelcst is positive it means that we can safely fold the pc rel // displacement into the GOTPCREL. We can also can have an extra offset <cst> // if the target knows how to encode it. // int64_t GOTPCRelCst = Offset + MV.getConstant(); if (GOTPCRelCst < 0) return; if (!AP.getObjFileLowering().supportGOTPCRelWithOffset() && GOTPCRelCst != 0) return; // Emit the GOT PC relative to replace the got equivalent global, i.e.: // // bar: // .long 42 // gotequiv: // .quad bar // foo: // .long gotequiv - "." + <cst> // // is replaced by the target specific equivalent to: // // bar: // .long 42 // foo: // .long bar@GOTPCREL+<gotpcrelcst> // AsmPrinter::GOTEquivUsePair Result = AP.GlobalGOTEquivs[GOTEquivSym]; const GlobalVariable *GV = Result.first; int NumUses = (int)Result.second; const GlobalValue *FinalGV = dyn_cast<GlobalValue>(GV->getOperand(0)); const MCSymbol *FinalSym = AP.getSymbol(FinalGV); *ME = AP.getObjFileLowering().getIndirectSymViaGOTPCRel( FinalSym, MV, Offset, AP.MMI, *AP.OutStreamer); // Update GOT equivalent usage information --NumUses; if (NumUses >= 0) AP.GlobalGOTEquivs[GOTEquivSym] = std::make_pair(GV, NumUses); } static void emitGlobalConstantImpl(const Constant *CV, AsmPrinter &AP, const Constant *BaseCV, uint64_t Offset) { const DataLayout *DL = AP.TM.getDataLayout(); uint64_t Size = DL->getTypeAllocSize(CV->getType()); // Globals with sub-elements such as combinations of arrays and structs // are handled recursively by emitGlobalConstantImpl. Keep track of the // constant symbol base and the current position with BaseCV and Offset. if (!BaseCV && CV->hasOneUse()) BaseCV = dyn_cast<Constant>(CV->user_back()); if (isa<ConstantAggregateZero>(CV) || isa<UndefValue>(CV)) return AP.OutStreamer->EmitZeros(Size); if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) { switch (Size) { case 1: case 2: case 4: case 8: if (AP.isVerbose()) AP.OutStreamer->GetCommentOS() << format("0x%" PRIx64 "\n", CI->getZExtValue()); AP.OutStreamer->EmitIntValue(CI->getZExtValue(), Size); return; default: emitGlobalConstantLargeInt(CI, AP); return; } } if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) return emitGlobalConstantFP(CFP, AP); if (isa<ConstantPointerNull>(CV)) { AP.OutStreamer->EmitIntValue(0, Size); return; } if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(CV)) return emitGlobalConstantDataSequential(CDS, AP); if (const ConstantArray *CVA = dyn_cast<ConstantArray>(CV)) return emitGlobalConstantArray(CVA, AP, BaseCV, Offset); if (const ConstantStruct *CVS = dyn_cast<ConstantStruct>(CV)) return emitGlobalConstantStruct(CVS, AP, BaseCV, Offset); if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) { // Look through bitcasts, which might not be able to be MCExpr'ized (e.g. of // vectors). if (CE->getOpcode() == Instruction::BitCast) return emitGlobalConstantImpl(CE->getOperand(0), AP); if (Size > 8) { // If the constant expression's size is greater than 64-bits, then we have // to emit the value in chunks. Try to constant fold the value and emit it // that way. Constant *New = ConstantFoldConstantExpression(CE, *DL); if (New && New != CE) return emitGlobalConstantImpl(New, AP); } } if (const ConstantVector *V = dyn_cast<ConstantVector>(CV)) return emitGlobalConstantVector(V, AP); // Otherwise, it must be a ConstantExpr. Lower it to an MCExpr, then emit it // thread the streamer with EmitValue. const MCExpr *ME = AP.lowerConstant(CV); // Since lowerConstant already folded and got rid of all IR pointer and // integer casts, detect GOT equivalent accesses by looking into the MCExpr // directly. if (AP.getObjFileLowering().supportIndirectSymViaGOTPCRel()) handleIndirectSymViaGOTPCRel(AP, &ME, BaseCV, Offset); AP.OutStreamer->EmitValue(ME, Size); } /// EmitGlobalConstant - Print a general LLVM constant to the .s file. void AsmPrinter::EmitGlobalConstant(const Constant *CV) { uint64_t Size = TM.getDataLayout()->getTypeAllocSize(CV->getType()); if (Size) emitGlobalConstantImpl(CV, *this); else if (MAI->hasSubsectionsViaSymbols()) { // If the global has zero size, emit a single byte so that two labels don't // look like they are at the same location. OutStreamer->EmitIntValue(0, 1); } } void AsmPrinter::EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) { // Target doesn't support this yet! llvm_unreachable("Target does not support EmitMachineConstantPoolValue"); } void AsmPrinter::printOffset(int64_t Offset, raw_ostream &OS) const { if (Offset > 0) OS << '+' << Offset; else if (Offset < 0) OS << Offset; } //===----------------------------------------------------------------------===// // Symbol Lowering Routines. //===----------------------------------------------------------------------===// MCSymbol *AsmPrinter::createTempSymbol(const Twine &Name) const { return OutContext.createTempSymbol(Name, true); } MCSymbol *AsmPrinter::GetBlockAddressSymbol(const BlockAddress *BA) const { return MMI->getAddrLabelSymbol(BA->getBasicBlock()); } MCSymbol *AsmPrinter::GetBlockAddressSymbol(const BasicBlock *BB) const { return MMI->getAddrLabelSymbol(BB); } /// GetCPISymbol - Return the symbol for the specified constant pool entry. MCSymbol *AsmPrinter::GetCPISymbol(unsigned CPID) const { const DataLayout *DL = TM.getDataLayout(); return OutContext.getOrCreateSymbol (Twine(DL->getPrivateGlobalPrefix()) + "CPI" + Twine(getFunctionNumber()) + "_" + Twine(CPID)); } /// GetJTISymbol - Return the symbol for the specified jump table entry. MCSymbol *AsmPrinter::GetJTISymbol(unsigned JTID, bool isLinkerPrivate) const { return MF->getJTISymbol(JTID, OutContext, isLinkerPrivate); } /// GetJTSetSymbol - Return the symbol for the specified jump table .set /// FIXME: privatize to AsmPrinter. MCSymbol *AsmPrinter::GetJTSetSymbol(unsigned UID, unsigned MBBID) const { const DataLayout *DL = TM.getDataLayout(); return OutContext.getOrCreateSymbol (Twine(DL->getPrivateGlobalPrefix()) + Twine(getFunctionNumber()) + "_" + Twine(UID) + "_set_" + Twine(MBBID)); } MCSymbol *AsmPrinter::getSymbolWithGlobalValueBase(const GlobalValue *GV, StringRef Suffix) const { return getObjFileLowering().getSymbolWithGlobalValueBase(GV, Suffix, *Mang, TM); } /// Return the MCSymbol for the specified ExternalSymbol. MCSymbol *AsmPrinter::GetExternalSymbolSymbol(StringRef Sym) const { SmallString<60> NameStr; Mangler::getNameWithPrefix(NameStr, Sym, *TM.getDataLayout()); return OutContext.getOrCreateSymbol(NameStr); } /// PrintParentLoopComment - Print comments about parent loops of this one. static void PrintParentLoopComment(raw_ostream &OS, const MachineLoop *Loop, unsigned FunctionNumber) { if (!Loop) return; PrintParentLoopComment(OS, Loop->getParentLoop(), FunctionNumber); OS.indent(Loop->getLoopDepth()*2) << "Parent Loop BB" << FunctionNumber << "_" << Loop->getHeader()->getNumber() << " Depth=" << Loop->getLoopDepth() << '\n'; } /// PrintChildLoopComment - Print comments about child loops within /// the loop for this basic block, with nesting. static void PrintChildLoopComment(raw_ostream &OS, const MachineLoop *Loop, unsigned FunctionNumber) { // Add child loop information for (const MachineLoop *CL : *Loop) { OS.indent(CL->getLoopDepth()*2) << "Child Loop BB" << FunctionNumber << "_" << CL->getHeader()->getNumber() << " Depth " << CL->getLoopDepth() << '\n'; PrintChildLoopComment(OS, CL, FunctionNumber); } } /// emitBasicBlockLoopComments - Pretty-print comments for basic blocks. static void emitBasicBlockLoopComments(const MachineBasicBlock &MBB, const MachineLoopInfo *LI, const AsmPrinter &AP) { // Add loop depth information const MachineLoop *Loop = LI->getLoopFor(&MBB); if (!Loop) return; MachineBasicBlock *Header = Loop->getHeader(); assert(Header && "No header for loop"); // If this block is not a loop header, just print out what is the loop header // and return. if (Header != &MBB) { AP.OutStreamer->AddComment(" in Loop: Header=BB" + Twine(AP.getFunctionNumber())+"_" + Twine(Loop->getHeader()->getNumber())+ " Depth="+Twine(Loop->getLoopDepth())); return; } // Otherwise, it is a loop header. Print out information about child and // parent loops. raw_ostream &OS = AP.OutStreamer->GetCommentOS(); PrintParentLoopComment(OS, Loop->getParentLoop(), AP.getFunctionNumber()); OS << "=>"; OS.indent(Loop->getLoopDepth()*2-2); OS << "This "; if (Loop->empty()) OS << "Inner "; OS << "Loop Header: Depth=" + Twine(Loop->getLoopDepth()) << '\n'; PrintChildLoopComment(OS, Loop, AP.getFunctionNumber()); } /// EmitBasicBlockStart - This method prints the label for the specified /// MachineBasicBlock, an alignment (if present) and a comment describing /// it if appropriate. void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock &MBB) const { // Emit an alignment directive for this block, if needed. if (unsigned Align = MBB.getAlignment()) EmitAlignment(Align); // If the block has its address taken, emit any labels that were used to // reference the block. It is possible that there is more than one label // here, because multiple LLVM BB's may have been RAUW'd to this block after // the references were generated. if (MBB.hasAddressTaken()) { const BasicBlock *BB = MBB.getBasicBlock(); if (isVerbose()) OutStreamer->AddComment("Block address taken"); for (MCSymbol *Sym : MMI->getAddrLabelSymbolToEmit(BB)) OutStreamer->EmitLabel(Sym); } // Print some verbose block comments. if (isVerbose()) { if (const BasicBlock *BB = MBB.getBasicBlock()) if (BB->hasName()) OutStreamer->AddComment("%" + BB->getName()); emitBasicBlockLoopComments(MBB, LI, *this); } // Print the main label for the block. if (MBB.pred_empty() || isBlockOnlyReachableByFallthrough(&MBB)) { if (isVerbose()) { // NOTE: Want this comment at start of line, don't emit with AddComment. OutStreamer->emitRawComment(" BB#" + Twine(MBB.getNumber()) + ":", false); } } else { OutStreamer->EmitLabel(MBB.getSymbol()); } } void AsmPrinter::EmitVisibility(MCSymbol *Sym, unsigned Visibility, bool IsDefinition) const { MCSymbolAttr Attr = MCSA_Invalid; switch (Visibility) { default: break; case GlobalValue::HiddenVisibility: if (IsDefinition) Attr = MAI->getHiddenVisibilityAttr(); else Attr = MAI->getHiddenDeclarationVisibilityAttr(); break; case GlobalValue::ProtectedVisibility: Attr = MAI->getProtectedVisibilityAttr(); break; } if (Attr != MCSA_Invalid) OutStreamer->EmitSymbolAttribute(Sym, Attr); } /// isBlockOnlyReachableByFallthough - Return true if the basic block has /// exactly one predecessor and the control transfer mechanism between /// the predecessor and this block is a fall-through. bool AsmPrinter:: isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const { // If this is a landing pad, it isn't a fall through. If it has no preds, // then nothing falls through to it. if (MBB->isLandingPad() || MBB->pred_empty()) return false; // If there isn't exactly one predecessor, it can't be a fall through. if (MBB->pred_size() > 1) return false; // The predecessor has to be immediately before this block. MachineBasicBlock *Pred = *MBB->pred_begin(); if (!Pred->isLayoutSuccessor(MBB)) return false; // If the block is completely empty, then it definitely does fall through. if (Pred->empty()) return true; // Check the terminators in the previous blocks for (const auto &MI : Pred->terminators()) { // If it is not a simple branch, we are in a table somewhere. if (!MI.isBranch() || MI.isIndirectBranch()) return false; // If we are the operands of one of the branches, this is not a fall // through. Note that targets with delay slots will usually bundle // terminators with the delay slot instruction. for (ConstMIBundleOperands OP(&MI); OP.isValid(); ++OP) { if (OP->isJTI()) return false; if (OP->isMBB() && OP->getMBB() == MBB) return false; } } return true; } GCMetadataPrinter *AsmPrinter::GetOrCreateGCPrinter(GCStrategy &S) { if (!S.usesMetadata()) return nullptr; assert(!S.useStatepoints() && "statepoints do not currently support custom" " stackmap formats, please see the documentation for a description of" " the default format. If you really need a custom serialized format," " please file a bug"); gcp_map_type &GCMap = getGCMap(GCMetadataPrinters); gcp_map_type::iterator GCPI = GCMap.find(&S); if (GCPI != GCMap.end()) return GCPI->second.get(); const char *Name = S.getName().c_str(); for (GCMetadataPrinterRegistry::iterator I = GCMetadataPrinterRegistry::begin(), E = GCMetadataPrinterRegistry::end(); I != E; ++I) if (strcmp(Name, I->getName()) == 0) { std::unique_ptr<GCMetadataPrinter> GMP = I->instantiate(); GMP->S = &S; auto IterBool = GCMap.insert(std::make_pair(&S, std::move(GMP))); return IterBool.first->second.get(); } report_fatal_error("no GCMetadataPrinter registered for GC: " + Twine(Name)); } /// Pin vtable to this file. AsmPrinterHandler::~AsmPrinterHandler() {} void AsmPrinterHandler::markFunctionEnd() {}
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/EHStreamer.h
//===-- EHStreamer.h - Exception Handling Directive Streamer ---*- C++ -*--===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing exception info into assembly files. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_EHSTREAMER_H #define LLVM_LIB_CODEGEN_ASMPRINTER_EHSTREAMER_H #include "AsmPrinterHandler.h" #include "llvm/ADT/DenseMap.h" namespace llvm { struct LandingPadInfo; class MachineModuleInfo; class MachineInstr; class MachineFunction; class AsmPrinter; class MCSymbol; class MCSymbolRefExpr; template <typename T> class SmallVectorImpl; /// Emits exception handling directives. class LLVM_LIBRARY_VISIBILITY EHStreamer : public AsmPrinterHandler { protected: /// Target of directive emission. AsmPrinter *Asm; /// Collected machine module information. MachineModuleInfo *MMI; /// How many leading type ids two landing pads have in common. static unsigned sharedTypeIDs(const LandingPadInfo *L, const LandingPadInfo *R); /// Structure holding a try-range and the associated landing pad. struct PadRange { // The index of the landing pad. unsigned PadIndex; // The index of the begin and end labels in the landing pad's label lists. unsigned RangeIndex; }; typedef DenseMap<MCSymbol *, PadRange> RangeMapType; /// Structure describing an entry in the actions table. struct ActionEntry { int ValueForTypeID; // The value to write - may not be equal to the type id. int NextAction; unsigned Previous; }; /// Structure describing an entry in the call-site table. struct CallSiteEntry { // The 'try-range' is BeginLabel .. EndLabel. MCSymbol *BeginLabel; // Null indicates the start of the function. MCSymbol *EndLabel; // Null indicates the end of the function. // LPad contains the landing pad start labels. const LandingPadInfo *LPad; // Null indicates that there is no landing pad. unsigned Action; }; /// Compute the actions table and gather the first action index for each /// landing pad site. unsigned computeActionsTable(const SmallVectorImpl<const LandingPadInfo*>&LPs, SmallVectorImpl<ActionEntry> &Actions, SmallVectorImpl<unsigned> &FirstActions); /// Return `true' if this is a call to a function marked `nounwind'. Return /// `false' otherwise. bool callToNoUnwindFunction(const MachineInstr *MI); void computePadMap(const SmallVectorImpl<const LandingPadInfo *> &LandingPads, RangeMapType &PadMap); /// Compute the call-site table. The entry for an invoke has a try-range /// containing the call, a non-zero landing pad and an appropriate action. /// The entry for an ordinary call has a try-range containing the call and /// zero for the landing pad and the action. Calls marked 'nounwind' have /// no entry and must not be contained in the try-range of any entry - they /// form gaps in the table. Entries must be ordered by try-range address. void computeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites, const SmallVectorImpl<const LandingPadInfo *> &LPs, const SmallVectorImpl<unsigned> &FirstActions); /// Emit landing pads and actions. /// /// The general organization of the table is complex, but the basic concepts /// are easy. First there is a header which describes the location and /// organization of the three components that follow. /// 1. The landing pad site information describes the range of code covered /// by the try. In our case it's an accumulation of the ranges covered /// by the invokes in the try. There is also a reference to the landing /// pad that handles the exception once processed. Finally an index into /// the actions table. /// 2. The action table, in our case, is composed of pairs of type ids /// and next action offset. Starting with the action index from the /// landing pad site, each type Id is checked for a match to the current /// exception. If it matches then the exception and type id are passed /// on to the landing pad. Otherwise the next action is looked up. This /// chain is terminated with a next action of zero. If no type id is /// found the frame is unwound and handling continues. /// 3. Type id table contains references to all the C++ typeinfo for all /// catches in the function. This tables is reversed indexed base 1. void emitExceptionTable(); virtual void emitTypeInfos(unsigned TTypeEncoding); // Helpers for for identifying what kind of clause an EH typeid or selector // corresponds to. Negative selectors are for filter clauses, the zero // selector is for cleanups, and positive selectors are for catch clauses. static bool isFilterEHSelector(int Selector) { return Selector < 0; } static bool isCleanupEHSelector(int Selector) { return Selector == 0; } static bool isCatchEHSelector(int Selector) { return Selector > 0; } public: EHStreamer(AsmPrinter *A); ~EHStreamer() override; // Unused. void setSymbolSize(const MCSymbol *Sym, uint64_t Size) override {} void beginInstruction(const MachineInstr *MI) override {} void endInstruction() override {} }; } #endif
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DIEHash.h
//===-- llvm/CodeGen/DIEHash.h - Dwarf Hashing Framework -------*- C++ -*--===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for DWARF4 hashing of DIEs. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DIEHASH_H #define LLVM_LIB_CODEGEN_ASMPRINTER_DIEHASH_H #include "llvm/ADT/DenseMap.h" #include "llvm/CodeGen/DIE.h" #include "llvm/Support/MD5.h" namespace llvm { class AsmPrinter; class CompileUnit; /// \brief An object containing the capability of hashing and adding hash /// attributes onto a DIE. class DIEHash { // Collection of all attributes used in hashing a particular DIE. struct DIEAttrs { DIEValue DW_AT_name; DIEValue DW_AT_accessibility; DIEValue DW_AT_address_class; DIEValue DW_AT_allocated; DIEValue DW_AT_artificial; DIEValue DW_AT_associated; DIEValue DW_AT_binary_scale; DIEValue DW_AT_bit_offset; DIEValue DW_AT_bit_size; DIEValue DW_AT_bit_stride; DIEValue DW_AT_byte_size; DIEValue DW_AT_byte_stride; DIEValue DW_AT_const_expr; DIEValue DW_AT_const_value; DIEValue DW_AT_containing_type; DIEValue DW_AT_count; DIEValue DW_AT_data_bit_offset; DIEValue DW_AT_data_location; DIEValue DW_AT_data_member_location; DIEValue DW_AT_decimal_scale; DIEValue DW_AT_decimal_sign; DIEValue DW_AT_default_value; DIEValue DW_AT_digit_count; DIEValue DW_AT_discr; DIEValue DW_AT_discr_list; DIEValue DW_AT_discr_value; DIEValue DW_AT_encoding; DIEValue DW_AT_enum_class; DIEValue DW_AT_endianity; DIEValue DW_AT_explicit; DIEValue DW_AT_is_optional; DIEValue DW_AT_location; DIEValue DW_AT_lower_bound; DIEValue DW_AT_mutable; DIEValue DW_AT_ordering; DIEValue DW_AT_picture_string; DIEValue DW_AT_prototyped; DIEValue DW_AT_small; DIEValue DW_AT_segment; DIEValue DW_AT_string_length; DIEValue DW_AT_threads_scaled; DIEValue DW_AT_upper_bound; DIEValue DW_AT_use_location; DIEValue DW_AT_use_UTF8; DIEValue DW_AT_variable_parameter; DIEValue DW_AT_virtuality; DIEValue DW_AT_visibility; DIEValue DW_AT_vtable_elem_location; DIEValue DW_AT_type; // Insert any additional ones here... }; public: DIEHash(AsmPrinter *A = nullptr) : AP(A) {} /// \brief Computes the ODR signature. uint64_t computeDIEODRSignature(const DIE &Die); /// \brief Computes the CU signature. uint64_t computeCUSignature(const DIE &Die); /// \brief Computes the type signature. uint64_t computeTypeSignature(const DIE &Die); // Helper routines to process parts of a DIE. private: /// \brief Adds the parent context of \param Die to the hash. void addParentContext(const DIE &Die); /// \brief Adds the attributes of \param Die to the hash. void addAttributes(const DIE &Die); /// \brief Computes the full DWARF4 7.27 hash of the DIE. void computeHash(const DIE &Die); // Routines that add DIEValues to the hash. public: /// \brief Adds \param Value to the hash. void update(uint8_t Value) { Hash.update(Value); } /// \brief Encodes and adds \param Value to the hash as a ULEB128. void addULEB128(uint64_t Value); /// \brief Encodes and adds \param Value to the hash as a SLEB128. void addSLEB128(int64_t Value); private: /// \brief Adds \param Str to the hash and includes a NULL byte. void addString(StringRef Str); /// \brief Collects the attributes of DIE \param Die into the \param Attrs /// structure. void collectAttributes(const DIE &Die, DIEAttrs &Attrs); /// \brief Hashes the attributes in \param Attrs in order. void hashAttributes(const DIEAttrs &Attrs, dwarf::Tag Tag); /// \brief Hashes the data in a block like DIEValue, e.g. DW_FORM_block or /// DW_FORM_exprloc. void hashBlockData(const DIE::const_value_range &Values); /// \brief Hashes the contents pointed to in the .debug_loc section. void hashLocList(const DIELocList &LocList); /// \brief Hashes an individual attribute. void hashAttribute(DIEValue Value, dwarf::Tag Tag); /// \brief Hashes an attribute that refers to another DIE. void hashDIEEntry(dwarf::Attribute Attribute, dwarf::Tag Tag, const DIE &Entry); /// \brief Hashes a reference to a named type in such a way that is /// independent of whether that type is described by a declaration or a /// definition. void hashShallowTypeReference(dwarf::Attribute Attribute, const DIE &Entry, StringRef Name); /// \brief Hashes a reference to a previously referenced type DIE. void hashRepeatedTypeReference(dwarf::Attribute Attribute, unsigned DieNumber); void hashNestedType(const DIE &Die, StringRef Name); private: MD5 Hash; AsmPrinter *AP; DenseMap<const DIE *, unsigned> Numbering; }; } #endif
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/LLVMBuild.txt
;===- ./lib/CodeGen/AsmPrinter/LLVMBuild.txt -------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = AsmPrinter parent = Libraries required_libraries = Analysis CodeGen Core Support Target TransformUtils ; MC MCParser - HLSL Change
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/AddressPool.h
//===-- llvm/CodeGen/AddressPool.h - Dwarf Debug Framework -----*- C++ -*--===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_ADDRESSPOOL_H #define LLVM_LIB_CODEGEN_ASMPRINTER_ADDRESSPOOL_H #include "llvm/ADT/DenseMap.h" namespace llvm { class MCSection; class MCSymbol; class AsmPrinter; // Collection of addresses for this unit and assorted labels. // A Symbol->unsigned mapping of addresses used by indirect // references. class AddressPool { struct AddressPoolEntry { unsigned Number; bool TLS; AddressPoolEntry(unsigned Number, bool TLS) : Number(Number), TLS(TLS) {} }; DenseMap<const MCSymbol *, AddressPoolEntry> Pool; /// Record whether the AddressPool has been queried for an address index since /// the last "resetUsedFlag" call. Used to implement type unit fallback - a /// type that references addresses cannot be placed in a type unit when using /// fission. bool HasBeenUsed; public: AddressPool() : HasBeenUsed(false) {} /// \brief Returns the index into the address pool with the given /// label/symbol. unsigned getIndex(const MCSymbol *Sym, bool TLS = false); void emit(AsmPrinter &Asm, MCSection *AddrSection); bool isEmpty() { return Pool.empty(); } bool hasBeenUsed() const { return HasBeenUsed; } void resetUsedFlag() { HasBeenUsed = false; } }; } #endif
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h
//===-- llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h ----*- C++ -*--===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing line tables info into COFF files. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_WINCODEVIEWLINETABLES_H #define LLVM_LIB_CODEGEN_ASMPRINTER_WINCODEVIEWLINETABLES_H #include "AsmPrinterHandler.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/LexicalScopes.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DebugLoc.h" #include "llvm/MC/MCStreamer.h" #include "llvm/Target/TargetLoweringObjectFile.h" namespace llvm { /// \brief Collects and handles line tables information in a CodeView format. class LLVM_LIBRARY_VISIBILITY WinCodeViewLineTables : public AsmPrinterHandler { AsmPrinter *Asm; DebugLoc PrevInstLoc; // For each function, store a vector of labels to its instructions, as well as // to the end of the function. struct FunctionInfo { SmallVector<MCSymbol *, 10> Instrs; MCSymbol *End; FunctionInfo() : End(nullptr) {} } *CurFn; typedef DenseMap<const Function *, FunctionInfo> FnDebugInfoTy; FnDebugInfoTy FnDebugInfo; // Store the functions we've visited in a vector so we can maintain a stable // order while emitting subsections. SmallVector<const Function *, 10> VisitedFunctions; // InstrInfoTy - Holds the Filename:LineNumber information for every // instruction with a unique debug location. struct InstrInfoTy { StringRef Filename; unsigned LineNumber; unsigned ColumnNumber; InstrInfoTy() : LineNumber(0), ColumnNumber(0) {} InstrInfoTy(StringRef Filename, unsigned LineNumber, unsigned ColumnNumber) : Filename(Filename), LineNumber(LineNumber), ColumnNumber(ColumnNumber) {} }; DenseMap<MCSymbol *, InstrInfoTy> InstrInfo; // FileNameRegistry - Manages filenames observed while generating debug info // by filtering out duplicates and bookkeeping the offsets in the string // table to be generated. struct FileNameRegistryTy { SmallVector<StringRef, 10> Filenames; struct PerFileInfo { size_t FilenameID, StartOffset; }; StringMap<PerFileInfo> Infos; // The offset in the string table where we'll write the next unique // filename. size_t LastOffset; FileNameRegistryTy() { clear(); } // Add Filename to the registry, if it was not observed before. void add(StringRef Filename) { if (Infos.count(Filename)) return; size_t OldSize = Infos.size(); Infos[Filename].FilenameID = OldSize; Infos[Filename].StartOffset = LastOffset; LastOffset += Filename.size() + 1; Filenames.push_back(Filename); } void clear() { LastOffset = 1; Infos.clear(); Filenames.clear(); } } FileNameRegistry; typedef std::map<std::pair<StringRef, StringRef>, char *> DirAndFilenameToFilepathMapTy; DirAndFilenameToFilepathMapTy DirAndFilenameToFilepathMap; StringRef getFullFilepath(const MDNode *S); void maybeRecordLocation(DebugLoc DL, const MachineFunction *MF); void clear() { assert(CurFn == nullptr); FileNameRegistry.clear(); InstrInfo.clear(); } void emitDebugInfoForFunction(const Function *GV); public: WinCodeViewLineTables(AsmPrinter *Asm); ~WinCodeViewLineTables() override { for (DirAndFilenameToFilepathMapTy::iterator I = DirAndFilenameToFilepathMap.begin(), E = DirAndFilenameToFilepathMap.end(); I != E; ++I) free(I->second); } void setSymbolSize(const llvm::MCSymbol *, uint64_t) override {} /// \brief Emit the COFF section that holds the line table information. void endModule() override; /// \brief Gather pre-function debug information. void beginFunction(const MachineFunction *MF) override; /// \brief Gather post-function debug information. void endFunction(const MachineFunction *) override; /// \brief Process beginning of an instruction. void beginInstruction(const MachineInstr *MI) override; /// \brief Process end of an instruction. void endInstruction() override {} }; } // End of namespace llvm #endif
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DIEHash.cpp
//===-- llvm/CodeGen/DIEHash.cpp - Dwarf Hashing Framework ----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for DWARF4 hashing of DIEs. // //===----------------------------------------------------------------------===// #include "ByteStreamer.h" #include "DIEHash.h" #include "DwarfDebug.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringRef.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/DIE.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/Endian.h" #include "llvm/Support/MD5.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "dwarfdebug" /// \brief Grabs the string in whichever attribute is passed in and returns /// a reference to it. static StringRef getDIEStringAttr(const DIE &Die, uint16_t Attr) { // Iterate through all the attributes until we find the one we're // looking for, if we can't find it return an empty string. for (const auto &V : Die.values()) if (V.getAttribute() == Attr) return V.getDIEString().getString(); return StringRef(""); } /// \brief Adds the string in \p Str to the hash. This also hashes /// a trailing NULL with the string. void DIEHash::addString(StringRef Str) { DEBUG(dbgs() << "Adding string " << Str << " to hash.\n"); Hash.update(Str); Hash.update(makeArrayRef((uint8_t)'\0')); } // FIXME: The LEB128 routines are copied and only slightly modified out of // LEB128.h. /// \brief Adds the unsigned in \p Value to the hash encoded as a ULEB128. void DIEHash::addULEB128(uint64_t Value) { DEBUG(dbgs() << "Adding ULEB128 " << Value << " to hash.\n"); do { uint8_t Byte = Value & 0x7f; Value >>= 7; if (Value != 0) Byte |= 0x80; // Mark this byte to show that more bytes will follow. Hash.update(Byte); } while (Value != 0); } void DIEHash::addSLEB128(int64_t Value) { DEBUG(dbgs() << "Adding ULEB128 " << Value << " to hash.\n"); bool More; do { uint8_t Byte = Value & 0x7f; Value >>= 7; More = !((((Value == 0) && ((Byte & 0x40) == 0)) || ((Value == -1) && ((Byte & 0x40) != 0)))); if (More) Byte |= 0x80; // Mark this byte to show that more bytes will follow. Hash.update(Byte); } while (More); } /// \brief Including \p Parent adds the context of Parent to the hash.. void DIEHash::addParentContext(const DIE &Parent) { DEBUG(dbgs() << "Adding parent context to hash...\n"); // [7.27.2] For each surrounding type or namespace beginning with the // outermost such construct... SmallVector<const DIE *, 1> Parents; const DIE *Cur = &Parent; while (Cur->getParent()) { Parents.push_back(Cur); Cur = Cur->getParent(); } assert(Cur->getTag() == dwarf::DW_TAG_compile_unit || Cur->getTag() == dwarf::DW_TAG_type_unit); // Reverse iterate over our list to go from the outermost construct to the // innermost. for (SmallVectorImpl<const DIE *>::reverse_iterator I = Parents.rbegin(), E = Parents.rend(); I != E; ++I) { const DIE &Die = **I; // ... Append the letter "C" to the sequence... addULEB128('C'); // ... Followed by the DWARF tag of the construct... addULEB128(Die.getTag()); // ... Then the name, taken from the DW_AT_name attribute. StringRef Name = getDIEStringAttr(Die, dwarf::DW_AT_name); DEBUG(dbgs() << "... adding context: " << Name << "\n"); if (!Name.empty()) addString(Name); } } // Collect all of the attributes for a particular DIE in single structure. void DIEHash::collectAttributes(const DIE &Die, DIEAttrs &Attrs) { #define COLLECT_ATTR(NAME) \ case dwarf::NAME: \ Attrs.NAME = V; \ break for (const auto &V : Die.values()) { DEBUG(dbgs() << "Attribute: " << dwarf::AttributeString(V.getAttribute()) << " added.\n"); switch (V.getAttribute()) { COLLECT_ATTR(DW_AT_name); COLLECT_ATTR(DW_AT_accessibility); COLLECT_ATTR(DW_AT_address_class); COLLECT_ATTR(DW_AT_allocated); COLLECT_ATTR(DW_AT_artificial); COLLECT_ATTR(DW_AT_associated); COLLECT_ATTR(DW_AT_binary_scale); COLLECT_ATTR(DW_AT_bit_offset); COLLECT_ATTR(DW_AT_bit_size); COLLECT_ATTR(DW_AT_bit_stride); COLLECT_ATTR(DW_AT_byte_size); COLLECT_ATTR(DW_AT_byte_stride); COLLECT_ATTR(DW_AT_const_expr); COLLECT_ATTR(DW_AT_const_value); COLLECT_ATTR(DW_AT_containing_type); COLLECT_ATTR(DW_AT_count); COLLECT_ATTR(DW_AT_data_bit_offset); COLLECT_ATTR(DW_AT_data_location); COLLECT_ATTR(DW_AT_data_member_location); COLLECT_ATTR(DW_AT_decimal_scale); COLLECT_ATTR(DW_AT_decimal_sign); COLLECT_ATTR(DW_AT_default_value); COLLECT_ATTR(DW_AT_digit_count); COLLECT_ATTR(DW_AT_discr); COLLECT_ATTR(DW_AT_discr_list); COLLECT_ATTR(DW_AT_discr_value); COLLECT_ATTR(DW_AT_encoding); COLLECT_ATTR(DW_AT_enum_class); COLLECT_ATTR(DW_AT_endianity); COLLECT_ATTR(DW_AT_explicit); COLLECT_ATTR(DW_AT_is_optional); COLLECT_ATTR(DW_AT_location); COLLECT_ATTR(DW_AT_lower_bound); COLLECT_ATTR(DW_AT_mutable); COLLECT_ATTR(DW_AT_ordering); COLLECT_ATTR(DW_AT_picture_string); COLLECT_ATTR(DW_AT_prototyped); COLLECT_ATTR(DW_AT_small); COLLECT_ATTR(DW_AT_segment); COLLECT_ATTR(DW_AT_string_length); COLLECT_ATTR(DW_AT_threads_scaled); COLLECT_ATTR(DW_AT_upper_bound); COLLECT_ATTR(DW_AT_use_location); COLLECT_ATTR(DW_AT_use_UTF8); COLLECT_ATTR(DW_AT_variable_parameter); COLLECT_ATTR(DW_AT_virtuality); COLLECT_ATTR(DW_AT_visibility); COLLECT_ATTR(DW_AT_vtable_elem_location); COLLECT_ATTR(DW_AT_type); default: break; } } } void DIEHash::hashShallowTypeReference(dwarf::Attribute Attribute, const DIE &Entry, StringRef Name) { // append the letter 'N' addULEB128('N'); // the DWARF attribute code (DW_AT_type or DW_AT_friend), addULEB128(Attribute); // the context of the tag, if (const DIE *Parent = Entry.getParent()) addParentContext(*Parent); // the letter 'E', addULEB128('E'); // and the name of the type. addString(Name); // Currently DW_TAG_friends are not used by Clang, but if they do become so, // here's the relevant spec text to implement: // // For DW_TAG_friend, if the referenced entry is the DW_TAG_subprogram, // the context is omitted and the name to be used is the ABI-specific name // of the subprogram (e.g., the mangled linker name). } void DIEHash::hashRepeatedTypeReference(dwarf::Attribute Attribute, unsigned DieNumber) { // a) If T is in the list of [previously hashed types], use the letter // 'R' as the marker addULEB128('R'); addULEB128(Attribute); // and use the unsigned LEB128 encoding of [the index of T in the // list] as the attribute value; addULEB128(DieNumber); } void DIEHash::hashDIEEntry(dwarf::Attribute Attribute, dwarf::Tag Tag, const DIE &Entry) { assert(Tag != dwarf::DW_TAG_friend && "No current LLVM clients emit friend " "tags. Add support here when there's " "a use case"); // Step 5 // If the tag in Step 3 is one of [the below tags] if ((Tag == dwarf::DW_TAG_pointer_type || Tag == dwarf::DW_TAG_reference_type || Tag == dwarf::DW_TAG_rvalue_reference_type || Tag == dwarf::DW_TAG_ptr_to_member_type) && // and the referenced type (via the [below attributes]) // FIXME: This seems overly restrictive, and causes hash mismatches // there's a decl/def difference in the containing type of a // ptr_to_member_type, but it's what DWARF says, for some reason. Attribute == dwarf::DW_AT_type) { // ... has a DW_AT_name attribute, StringRef Name = getDIEStringAttr(Entry, dwarf::DW_AT_name); if (!Name.empty()) { hashShallowTypeReference(Attribute, Entry, Name); return; } } unsigned &DieNumber = Numbering[&Entry]; if (DieNumber) { hashRepeatedTypeReference(Attribute, DieNumber); return; } // otherwise, b) use the letter 'T' as the marker, ... addULEB128('T'); addULEB128(Attribute); // ... process the type T recursively by performing Steps 2 through 7, and // use the result as the attribute value. DieNumber = Numbering.size(); computeHash(Entry); } // Hash all of the values in a block like set of values. This assumes that // all of the data is going to be added as integers. void DIEHash::hashBlockData(const DIE::const_value_range &Values) { for (const auto &V : Values) Hash.update((uint64_t)V.getDIEInteger().getValue()); } // Hash the contents of a loclistptr class. void DIEHash::hashLocList(const DIELocList &LocList) { HashingByteStreamer Streamer(*this); DwarfDebug &DD = *AP->getDwarfDebug(); const DebugLocStream &Locs = DD.getDebugLocs(); for (const auto &Entry : Locs.getEntries(Locs.getList(LocList.getValue()))) DD.emitDebugLocEntry(Streamer, Entry); } // Hash an individual attribute \param Attr based on the type of attribute and // the form. void DIEHash::hashAttribute(DIEValue Value, dwarf::Tag Tag) { dwarf::Attribute Attribute = Value.getAttribute(); // Other attribute values use the letter 'A' as the marker, and the value // consists of the form code (encoded as an unsigned LEB128 value) followed by // the encoding of the value according to the form code. To ensure // reproducibility of the signature, the set of forms used in the signature // computation is limited to the following: DW_FORM_sdata, DW_FORM_flag, // DW_FORM_string, and DW_FORM_block. switch (Value.getType()) { case DIEValue::isNone: llvm_unreachable("Expected valid DIEValue"); // 7.27 Step 3 // ... An attribute that refers to another type entry T is processed as // follows: case DIEValue::isEntry: hashDIEEntry(Attribute, Tag, Value.getDIEEntry().getEntry()); break; case DIEValue::isInteger: { addULEB128('A'); addULEB128(Attribute); switch (Value.getForm()) { case dwarf::DW_FORM_data1: case dwarf::DW_FORM_data2: case dwarf::DW_FORM_data4: case dwarf::DW_FORM_data8: case dwarf::DW_FORM_udata: case dwarf::DW_FORM_sdata: addULEB128(dwarf::DW_FORM_sdata); addSLEB128((int64_t)Value.getDIEInteger().getValue()); break; // DW_FORM_flag_present is just flag with a value of one. We still give it a // value so just use the value. case dwarf::DW_FORM_flag_present: case dwarf::DW_FORM_flag: addULEB128(dwarf::DW_FORM_flag); addULEB128((int64_t)Value.getDIEInteger().getValue()); break; default: llvm_unreachable("Unknown integer form!"); } break; } case DIEValue::isString: addULEB128('A'); addULEB128(Attribute); addULEB128(dwarf::DW_FORM_string); addString(Value.getDIEString().getString()); break; case DIEValue::isBlock: case DIEValue::isLoc: case DIEValue::isLocList: addULEB128('A'); addULEB128(Attribute); addULEB128(dwarf::DW_FORM_block); if (Value.getType() == DIEValue::isBlock) { addULEB128(Value.getDIEBlock().ComputeSize(AP)); hashBlockData(Value.getDIEBlock().values()); } else if (Value.getType() == DIEValue::isLoc) { addULEB128(Value.getDIELoc().ComputeSize(AP)); hashBlockData(Value.getDIELoc().values()); } else { // We could add the block length, but that would take // a bit of work and not add a lot of uniqueness // to the hash in some way we could test. hashLocList(Value.getDIELocList()); } break; // FIXME: It's uncertain whether or not we should handle this at the moment. case DIEValue::isExpr: case DIEValue::isLabel: case DIEValue::isDelta: case DIEValue::isTypeSignature: llvm_unreachable("Add support for additional value types."); } } // Go through the attributes from \param Attrs in the order specified in 7.27.4 // and hash them. void DIEHash::hashAttributes(const DIEAttrs &Attrs, dwarf::Tag Tag) { #define ADD_ATTR(ATTR) \ { \ if (ATTR) \ hashAttribute(ATTR, Tag); \ } ADD_ATTR(Attrs.DW_AT_name); ADD_ATTR(Attrs.DW_AT_accessibility); ADD_ATTR(Attrs.DW_AT_address_class); ADD_ATTR(Attrs.DW_AT_allocated); ADD_ATTR(Attrs.DW_AT_artificial); ADD_ATTR(Attrs.DW_AT_associated); ADD_ATTR(Attrs.DW_AT_binary_scale); ADD_ATTR(Attrs.DW_AT_bit_offset); ADD_ATTR(Attrs.DW_AT_bit_size); ADD_ATTR(Attrs.DW_AT_bit_stride); ADD_ATTR(Attrs.DW_AT_byte_size); ADD_ATTR(Attrs.DW_AT_byte_stride); ADD_ATTR(Attrs.DW_AT_const_expr); ADD_ATTR(Attrs.DW_AT_const_value); ADD_ATTR(Attrs.DW_AT_containing_type); ADD_ATTR(Attrs.DW_AT_count); ADD_ATTR(Attrs.DW_AT_data_bit_offset); ADD_ATTR(Attrs.DW_AT_data_location); ADD_ATTR(Attrs.DW_AT_data_member_location); ADD_ATTR(Attrs.DW_AT_decimal_scale); ADD_ATTR(Attrs.DW_AT_decimal_sign); ADD_ATTR(Attrs.DW_AT_default_value); ADD_ATTR(Attrs.DW_AT_digit_count); ADD_ATTR(Attrs.DW_AT_discr); ADD_ATTR(Attrs.DW_AT_discr_list); ADD_ATTR(Attrs.DW_AT_discr_value); ADD_ATTR(Attrs.DW_AT_encoding); ADD_ATTR(Attrs.DW_AT_enum_class); ADD_ATTR(Attrs.DW_AT_endianity); ADD_ATTR(Attrs.DW_AT_explicit); ADD_ATTR(Attrs.DW_AT_is_optional); ADD_ATTR(Attrs.DW_AT_location); ADD_ATTR(Attrs.DW_AT_lower_bound); ADD_ATTR(Attrs.DW_AT_mutable); ADD_ATTR(Attrs.DW_AT_ordering); ADD_ATTR(Attrs.DW_AT_picture_string); ADD_ATTR(Attrs.DW_AT_prototyped); ADD_ATTR(Attrs.DW_AT_small); ADD_ATTR(Attrs.DW_AT_segment); ADD_ATTR(Attrs.DW_AT_string_length); ADD_ATTR(Attrs.DW_AT_threads_scaled); ADD_ATTR(Attrs.DW_AT_upper_bound); ADD_ATTR(Attrs.DW_AT_use_location); ADD_ATTR(Attrs.DW_AT_use_UTF8); ADD_ATTR(Attrs.DW_AT_variable_parameter); ADD_ATTR(Attrs.DW_AT_virtuality); ADD_ATTR(Attrs.DW_AT_visibility); ADD_ATTR(Attrs.DW_AT_vtable_elem_location); ADD_ATTR(Attrs.DW_AT_type); // FIXME: Add the extended attributes. } // Add all of the attributes for \param Die to the hash. void DIEHash::addAttributes(const DIE &Die) { DIEAttrs Attrs = {}; collectAttributes(Die, Attrs); hashAttributes(Attrs, Die.getTag()); } void DIEHash::hashNestedType(const DIE &Die, StringRef Name) { // 7.27 Step 7 // ... append the letter 'S', addULEB128('S'); // the tag of C, addULEB128(Die.getTag()); // and the name. addString(Name); } // Compute the hash of a DIE. This is based on the type signature computation // given in section 7.27 of the DWARF4 standard. It is the md5 hash of a // flattened description of the DIE. void DIEHash::computeHash(const DIE &Die) { // Append the letter 'D', followed by the DWARF tag of the DIE. addULEB128('D'); addULEB128(Die.getTag()); // Add each of the attributes of the DIE. addAttributes(Die); // Then hash each of the children of the DIE. for (auto &C : Die.children()) { // 7.27 Step 7 // If C is a nested type entry or a member function entry, ... if (isType(C.getTag()) || C.getTag() == dwarf::DW_TAG_subprogram) { StringRef Name = getDIEStringAttr(C, dwarf::DW_AT_name); // ... and has a DW_AT_name attribute if (!Name.empty()) { hashNestedType(C, Name); continue; } } computeHash(C); } // Following the last (or if there are no children), append a zero byte. Hash.update(makeArrayRef((uint8_t)'\0')); } /// This is based on the type signature computation given in section 7.27 of the /// DWARF4 standard. It is the md5 hash of a flattened description of the DIE /// with the exception that we are hashing only the context and the name of the /// type. uint64_t DIEHash::computeDIEODRSignature(const DIE &Die) { // Add the contexts to the hash. We won't be computing the ODR hash for // function local types so it's safe to use the generic context hashing // algorithm here. // FIXME: If we figure out how to account for linkage in some way we could // actually do this with a slight modification to the parent hash algorithm. if (const DIE *Parent = Die.getParent()) addParentContext(*Parent); // Add the current DIE information. // Add the DWARF tag of the DIE. addULEB128(Die.getTag()); // Add the name of the type to the hash. addString(getDIEStringAttr(Die, dwarf::DW_AT_name)); // Now get the result. MD5::MD5Result Result; Hash.final(Result); // ... take the least significant 8 bytes and return those. Our MD5 // implementation always returns its results in little endian, swap bytes // appropriately. return support::endian::read64le(Result + 8); } /// This is based on the type signature computation given in section 7.27 of the /// DWARF4 standard. It is an md5 hash of the flattened description of the DIE /// with the inclusion of the full CU and all top level CU entities. // TODO: Initialize the type chain at 0 instead of 1 for CU signatures. uint64_t DIEHash::computeCUSignature(const DIE &Die) { Numbering.clear(); Numbering[&Die] = 1; // Hash the DIE. computeHash(Die); // Now return the result. MD5::MD5Result Result; Hash.final(Result); // ... take the least significant 8 bytes and return those. Our MD5 // implementation always returns its results in little endian, swap bytes // appropriately. return support::endian::read64le(Result + 8); } /// This is based on the type signature computation given in section 7.27 of the /// DWARF4 standard. It is an md5 hash of the flattened description of the DIE /// with the inclusion of additional forms not specifically called out in the /// standard. uint64_t DIEHash::computeTypeSignature(const DIE &Die) { Numbering.clear(); Numbering[&Die] = 1; if (const DIE *Parent = Die.getParent()) addParentContext(*Parent); // Hash the DIE. computeHash(Die); // Now return the result. MD5::MD5Result Result; Hash.final(Result); // ... take the least significant 8 bytes and return those. Our MD5 // implementation always returns its results in little endian, swap bytes // appropriately. return support::endian::read64le(Result + 8); }
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
//===-- CodeGen/AsmPrinter/DwarfException.cpp - Dwarf Exception Impl ------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing DWARF exception info into asm files. // //===----------------------------------------------------------------------===// #include "DwarfException.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Twine.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Mangler.h" #include "llvm/IR/Module.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCSection.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/MC/MachineLocation.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FormattedStream.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetRegisterInfo.h" using namespace llvm; DwarfCFIExceptionBase::DwarfCFIExceptionBase(AsmPrinter *A) : EHStreamer(A), shouldEmitCFI(false) {} void DwarfCFIExceptionBase::markFunctionEnd() { if (shouldEmitCFI) Asm->OutStreamer->EmitCFIEndProc(); if (MMI->getLandingPads().empty()) return; // Map all labels and get rid of any dead landing pads. MMI->TidyLandingPads(); } DwarfCFIException::DwarfCFIException(AsmPrinter *A) : DwarfCFIExceptionBase(A), shouldEmitPersonality(false), shouldEmitLSDA(false), shouldEmitMoves(false), moveTypeModule(AsmPrinter::CFI_M_None) {} DwarfCFIException::~DwarfCFIException() {} /// endModule - Emit all exception information that should come after the /// content. void DwarfCFIException::endModule() { if (moveTypeModule == AsmPrinter::CFI_M_Debug) Asm->OutStreamer->EmitCFISections(false, true); // SjLj uses this pass and it doesn't need this info. if (!Asm->MAI->usesCFIForEH()) return; const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering(); unsigned PerEncoding = TLOF.getPersonalityEncoding(); if ((PerEncoding & 0x80) != dwarf::DW_EH_PE_indirect) return; // Emit references to all used personality functions const std::vector<const Function*> &Personalities = MMI->getPersonalities(); for (size_t i = 0, e = Personalities.size(); i != e; ++i) { if (!Personalities[i]) continue; MCSymbol *Sym = Asm->getSymbol(Personalities[i]); TLOF.emitPersonalityValue(*Asm->OutStreamer, Asm->TM, Sym); } } void DwarfCFIException::beginFunction(const MachineFunction *MF) { shouldEmitMoves = shouldEmitPersonality = shouldEmitLSDA = false; const Function *F = MF->getFunction(); // If any landing pads survive, we need an EH table. bool hasLandingPads = !MMI->getLandingPads().empty(); // See if we need frame move info. AsmPrinter::CFIMoveType MoveType = Asm->needsCFIMoves(); if (MoveType == AsmPrinter::CFI_M_EH || (MoveType == AsmPrinter::CFI_M_Debug && moveTypeModule == AsmPrinter::CFI_M_None)) moveTypeModule = MoveType; shouldEmitMoves = MoveType != AsmPrinter::CFI_M_None; const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering(); unsigned PerEncoding = TLOF.getPersonalityEncoding(); const Function *Per = nullptr; if (F->hasPersonalityFn()) Per = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts()); assert(!MMI->getPersonality() || Per == MMI->getPersonality()); // Emit a personality function even when there are no landing pads bool forceEmitPersonality = // ...if a personality function is explicitly specified F->hasPersonalityFn() && // ... and it's not known to be a noop in the absence of invokes !isNoOpWithoutInvoke(classifyEHPersonality(Per)) && // ... and we're not explicitly asked not to emit it F->needsUnwindTableEntry(); shouldEmitPersonality = (forceEmitPersonality || (hasLandingPads && PerEncoding != dwarf::DW_EH_PE_omit)) && Per; unsigned LSDAEncoding = TLOF.getLSDAEncoding(); shouldEmitLSDA = shouldEmitPersonality && LSDAEncoding != dwarf::DW_EH_PE_omit; shouldEmitCFI = shouldEmitPersonality || shouldEmitMoves; if (!shouldEmitCFI) return; Asm->OutStreamer->EmitCFIStartProc(/*IsSimple=*/false); // Indicate personality routine, if any. if (!shouldEmitPersonality) return; // If we are forced to emit this personality, make sure to record // it because it might not appear in any landingpad if (forceEmitPersonality) MMI->addPersonality(Per); const MCSymbol *Sym = TLOF.getCFIPersonalitySymbol(Per, *Asm->Mang, Asm->TM, MMI); Asm->OutStreamer->EmitCFIPersonality(Sym, PerEncoding); // Provide LSDA information. if (!shouldEmitLSDA) return; Asm->OutStreamer->EmitCFILsda(Asm->getCurExceptionSym(), LSDAEncoding); } /// endFunction - Gather and emit post-function exception information. /// void DwarfCFIException::endFunction(const MachineFunction *) { if (!shouldEmitPersonality) return; emitExceptionTable(); }
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/ByteStreamer.h
//===-- llvm/CodeGen/ByteStreamer.h - ByteStreamer class --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains a class that can take bytes that would normally be // streamed via the AsmPrinter. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_BYTESTREAMER_H #define LLVM_LIB_CODEGEN_ASMPRINTER_BYTESTREAMER_H #include "DIEHash.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/MC/MCStreamer.h" #include "llvm/Support/LEB128.h" #include <string> namespace llvm { class ByteStreamer { public: virtual ~ByteStreamer() {} // For now we're just handling the calls we need for dwarf emission/hashing. virtual void EmitInt8(uint8_t Byte, const Twine &Comment = "") = 0; virtual void EmitSLEB128(uint64_t DWord, const Twine &Comment = "") = 0; virtual void EmitULEB128(uint64_t DWord, const Twine &Comment = "") = 0; }; class APByteStreamer : public ByteStreamer { private: AsmPrinter &AP; public: APByteStreamer(AsmPrinter &Asm) : AP(Asm) {} void EmitInt8(uint8_t Byte, const Twine &Comment) override { AP.OutStreamer->AddComment(Comment); AP.EmitInt8(Byte); } void EmitSLEB128(uint64_t DWord, const Twine &Comment) override { AP.OutStreamer->AddComment(Comment); AP.EmitSLEB128(DWord); } void EmitULEB128(uint64_t DWord, const Twine &Comment) override { AP.OutStreamer->AddComment(Comment); AP.EmitULEB128(DWord); } }; class HashingByteStreamer : public ByteStreamer { private: DIEHash &Hash; public: HashingByteStreamer(DIEHash &H) : Hash(H) {} void EmitInt8(uint8_t Byte, const Twine &Comment) override { Hash.update(Byte); } void EmitSLEB128(uint64_t DWord, const Twine &Comment) override { Hash.addSLEB128(DWord); } void EmitULEB128(uint64_t DWord, const Twine &Comment) override { Hash.addULEB128(DWord); } }; class BufferByteStreamer : public ByteStreamer { private: SmallVectorImpl<char> &Buffer; SmallVectorImpl<std::string> &Comments; /// \brief Only verbose textual output needs comments. This will be set to /// true for that case, and false otherwise. If false, comments passed in to /// the emit methods will be ignored. bool GenerateComments; public: BufferByteStreamer(SmallVectorImpl<char> &Buffer, SmallVectorImpl<std::string> &Comments, bool GenerateComments) : Buffer(Buffer), Comments(Comments), GenerateComments(GenerateComments) {} void EmitInt8(uint8_t Byte, const Twine &Comment) override { Buffer.push_back(Byte); if (GenerateComments) Comments.push_back(Comment.str()); } void EmitSLEB128(uint64_t DWord, const Twine &Comment) override { raw_svector_ostream OSE(Buffer); encodeSLEB128(DWord, OSE); if (GenerateComments) Comments.push_back(Comment.str()); } void EmitULEB128(uint64_t DWord, const Twine &Comment) override { raw_svector_ostream OSE(Buffer); encodeULEB128(DWord, OSE); if (GenerateComments) Comments.push_back(Comment.str()); } }; } #endif
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
//===-- llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp --*- C++ -*--===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing line tables info into COFF files. // //===----------------------------------------------------------------------===// #include "WinCodeViewLineTables.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/COFF.h" namespace llvm { StringRef WinCodeViewLineTables::getFullFilepath(const MDNode *S) { assert(S); assert((isa<DICompileUnit>(S) || isa<DIFile>(S) || isa<DISubprogram>(S) || isa<DILexicalBlockBase>(S)) && "Unexpected scope info"); auto *Scope = cast<DIScope>(S); StringRef Dir = Scope->getDirectory(), Filename = Scope->getFilename(); char *&Result = DirAndFilenameToFilepathMap[std::make_pair(Dir, Filename)]; if (Result) return Result; // Clang emits directory and relative filename info into the IR, but CodeView // operates on full paths. We could change Clang to emit full paths too, but // that would increase the IR size and probably not needed for other users. // For now, just concatenate and canonicalize the path here. std::string Filepath; if (Filename.find(':') == 1) Filepath = Filename; else Filepath = (Dir + "\\" + Filename).str(); // Canonicalize the path. We have to do it textually because we may no longer // have access the file in the filesystem. // First, replace all slashes with backslashes. std::replace(Filepath.begin(), Filepath.end(), '/', '\\'); // Remove all "\.\" with "\". size_t Cursor = 0; while ((Cursor = Filepath.find("\\.\\", Cursor)) != std::string::npos) Filepath.erase(Cursor, 2); // Replace all "\XXX\..\" with "\". Don't try too hard though as the original // path should be well-formatted, e.g. start with a drive letter, etc. Cursor = 0; while ((Cursor = Filepath.find("\\..\\", Cursor)) != std::string::npos) { // Something's wrong if the path starts with "\..\", abort. if (Cursor == 0) break; size_t PrevSlash = Filepath.rfind('\\', Cursor - 1); if (PrevSlash == std::string::npos) // Something's wrong, abort. break; Filepath.erase(PrevSlash, Cursor + 3 - PrevSlash); // The next ".." might be following the one we've just erased. Cursor = PrevSlash; } // Remove all duplicate backslashes. Cursor = 0; while ((Cursor = Filepath.find("\\\\", Cursor)) != std::string::npos) Filepath.erase(Cursor, 1); Result = strdup(Filepath.c_str()); return StringRef(Result); } void WinCodeViewLineTables::maybeRecordLocation(DebugLoc DL, const MachineFunction *MF) { const MDNode *Scope = DL.getScope(); if (!Scope) return; StringRef Filename = getFullFilepath(Scope); // Skip this instruction if it has the same file:line as the previous one. assert(CurFn); if (!CurFn->Instrs.empty()) { const InstrInfoTy &LastInstr = InstrInfo[CurFn->Instrs.back()]; if (LastInstr.Filename == Filename && LastInstr.LineNumber == DL.getLine()) return; } FileNameRegistry.add(Filename); MCSymbol *MCL = Asm->MMI->getContext().createTempSymbol(); Asm->OutStreamer->EmitLabel(MCL); CurFn->Instrs.push_back(MCL); InstrInfo[MCL] = InstrInfoTy(Filename, DL.getLine(), DL.getCol()); } WinCodeViewLineTables::WinCodeViewLineTables(AsmPrinter *AP) : Asm(nullptr), CurFn(nullptr) { MachineModuleInfo *MMI = AP->MMI; // If module doesn't have named metadata anchors or COFF debug section // is not available, skip any debug info related stuff. if (!MMI->getModule()->getNamedMetadata("llvm.dbg.cu") || !AP->getObjFileLowering().getCOFFDebugSymbolsSection()) return; // Tell MMI that we have debug info. MMI->setDebugInfoAvailability(true); Asm = AP; } void WinCodeViewLineTables::endModule() { if (FnDebugInfo.empty()) return; assert(Asm != nullptr); Asm->OutStreamer->SwitchSection( Asm->getObjFileLowering().getCOFFDebugSymbolsSection()); Asm->EmitInt32(COFF::DEBUG_SECTION_MAGIC); // The COFF .debug$S section consists of several subsections, each starting // with a 4-byte control code (e.g. 0xF1, 0xF2, etc) and then a 4-byte length // of the payload followed by the payload itself. The subsections are 4-byte // aligned. // Emit per-function debug information. This code is extracted into a // separate function for readability. for (size_t I = 0, E = VisitedFunctions.size(); I != E; ++I) emitDebugInfoForFunction(VisitedFunctions[I]); // This subsection holds a file index to offset in string table table. Asm->OutStreamer->AddComment("File index to string table offset subsection"); Asm->EmitInt32(COFF::DEBUG_INDEX_SUBSECTION); size_t NumFilenames = FileNameRegistry.Infos.size(); Asm->EmitInt32(8 * NumFilenames); for (size_t I = 0, E = FileNameRegistry.Filenames.size(); I != E; ++I) { StringRef Filename = FileNameRegistry.Filenames[I]; // For each unique filename, just write its offset in the string table. Asm->EmitInt32(FileNameRegistry.Infos[Filename].StartOffset); // The function name offset is not followed by any additional data. Asm->EmitInt32(0); } // This subsection holds the string table. Asm->OutStreamer->AddComment("String table"); Asm->EmitInt32(COFF::DEBUG_STRING_TABLE_SUBSECTION); Asm->EmitInt32(FileNameRegistry.LastOffset); // The payload starts with a null character. Asm->EmitInt8(0); for (size_t I = 0, E = FileNameRegistry.Filenames.size(); I != E; ++I) { // Just emit unique filenames one by one, separated by a null character. Asm->OutStreamer->EmitBytes(FileNameRegistry.Filenames[I]); Asm->EmitInt8(0); } // No more subsections. Fill with zeros to align the end of the section by 4. Asm->OutStreamer->EmitFill((-FileNameRegistry.LastOffset) % 4, 0); clear(); } static void EmitLabelDiff(MCStreamer &Streamer, const MCSymbol *From, const MCSymbol *To, unsigned int Size = 4) { MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None; MCContext &Context = Streamer.getContext(); const MCExpr *FromRef = MCSymbolRefExpr::create(From, Variant, Context), *ToRef = MCSymbolRefExpr::create(To, Variant, Context); const MCExpr *AddrDelta = MCBinaryExpr::create(MCBinaryExpr::Sub, ToRef, FromRef, Context); Streamer.EmitValue(AddrDelta, Size); } void WinCodeViewLineTables::emitDebugInfoForFunction(const Function *GV) { // For each function there is a separate subsection // which holds the PC to file:line table. const MCSymbol *Fn = Asm->getSymbol(GV); assert(Fn); const FunctionInfo &FI = FnDebugInfo[GV]; if (FI.Instrs.empty()) return; assert(FI.End && "Don't know where the function ends?"); StringRef GVName = GV->getName(); StringRef FuncName; if (auto *SP = getDISubprogram(GV)) FuncName = SP->getDisplayName(); // FIXME Clang currently sets DisplayName to "bar" for a C++ // "namespace_foo::bar" function, see PR21528. Luckily, dbghelp.dll is trying // to demangle display names anyways, so let's just put a mangled name into // the symbols subsection until Clang gives us what we need. if (GVName.startswith("\01?")) FuncName = GVName.substr(1); // Emit a symbol subsection, required by VS2012+ to find function boundaries. MCSymbol *SymbolsBegin = Asm->MMI->getContext().createTempSymbol(), *SymbolsEnd = Asm->MMI->getContext().createTempSymbol(); Asm->OutStreamer->AddComment("Symbol subsection for " + Twine(FuncName)); Asm->EmitInt32(COFF::DEBUG_SYMBOL_SUBSECTION); EmitLabelDiff(*Asm->OutStreamer, SymbolsBegin, SymbolsEnd); Asm->OutStreamer->EmitLabel(SymbolsBegin); { MCSymbol *ProcSegmentBegin = Asm->MMI->getContext().createTempSymbol(), *ProcSegmentEnd = Asm->MMI->getContext().createTempSymbol(); EmitLabelDiff(*Asm->OutStreamer, ProcSegmentBegin, ProcSegmentEnd, 2); Asm->OutStreamer->EmitLabel(ProcSegmentBegin); Asm->EmitInt16(COFF::DEBUG_SYMBOL_TYPE_PROC_START); // Some bytes of this segment don't seem to be required for basic debugging, // so just fill them with zeroes. Asm->OutStreamer->EmitFill(12, 0); // This is the important bit that tells the debugger where the function // code is located and what's its size: EmitLabelDiff(*Asm->OutStreamer, Fn, FI.End); Asm->OutStreamer->EmitFill(12, 0); Asm->OutStreamer->EmitCOFFSecRel32(Fn); Asm->OutStreamer->EmitCOFFSectionIndex(Fn); Asm->EmitInt8(0); // Emit the function display name as a null-terminated string. Asm->OutStreamer->EmitBytes(FuncName); Asm->EmitInt8(0); Asm->OutStreamer->EmitLabel(ProcSegmentEnd); // We're done with this function. Asm->EmitInt16(0x0002); Asm->EmitInt16(COFF::DEBUG_SYMBOL_TYPE_PROC_END); } Asm->OutStreamer->EmitLabel(SymbolsEnd); // Every subsection must be aligned to a 4-byte boundary. Asm->OutStreamer->EmitFill((-FuncName.size()) % 4, 0); // PCs/Instructions are grouped into segments sharing the same filename. // Pre-calculate the lengths (in instructions) of these segments and store // them in a map for convenience. Each index in the map is the sequential // number of the respective instruction that starts a new segment. DenseMap<size_t, size_t> FilenameSegmentLengths; size_t LastSegmentEnd = 0; StringRef PrevFilename = InstrInfo[FI.Instrs[0]].Filename; for (size_t J = 1, F = FI.Instrs.size(); J != F; ++J) { if (PrevFilename == InstrInfo[FI.Instrs[J]].Filename) continue; FilenameSegmentLengths[LastSegmentEnd] = J - LastSegmentEnd; LastSegmentEnd = J; PrevFilename = InstrInfo[FI.Instrs[J]].Filename; } FilenameSegmentLengths[LastSegmentEnd] = FI.Instrs.size() - LastSegmentEnd; // Emit a line table subsection, requred to do PC-to-file:line lookup. Asm->OutStreamer->AddComment("Line table subsection for " + Twine(FuncName)); Asm->EmitInt32(COFF::DEBUG_LINE_TABLE_SUBSECTION); MCSymbol *LineTableBegin = Asm->MMI->getContext().createTempSymbol(), *LineTableEnd = Asm->MMI->getContext().createTempSymbol(); EmitLabelDiff(*Asm->OutStreamer, LineTableBegin, LineTableEnd); Asm->OutStreamer->EmitLabel(LineTableBegin); // Identify the function this subsection is for. Asm->OutStreamer->EmitCOFFSecRel32(Fn); Asm->OutStreamer->EmitCOFFSectionIndex(Fn); // Insert flags after a 16-bit section index. Asm->EmitInt16(COFF::DEBUG_LINE_TABLES_HAVE_COLUMN_RECORDS); // Length of the function's code, in bytes. EmitLabelDiff(*Asm->OutStreamer, Fn, FI.End); // PC-to-linenumber lookup table: MCSymbol *FileSegmentEnd = nullptr; // The start of the last segment: size_t LastSegmentStart = 0; auto FinishPreviousChunk = [&] { if (!FileSegmentEnd) return; for (size_t ColSegI = LastSegmentStart, ColSegEnd = ColSegI + FilenameSegmentLengths[LastSegmentStart]; ColSegI != ColSegEnd; ++ColSegI) { unsigned ColumnNumber = InstrInfo[FI.Instrs[ColSegI]].ColumnNumber; Asm->EmitInt16(ColumnNumber); // Start column Asm->EmitInt16(ColumnNumber); // End column } Asm->OutStreamer->EmitLabel(FileSegmentEnd); }; for (size_t J = 0, F = FI.Instrs.size(); J != F; ++J) { MCSymbol *Instr = FI.Instrs[J]; assert(InstrInfo.count(Instr)); if (FilenameSegmentLengths.count(J)) { // We came to a beginning of a new filename segment. FinishPreviousChunk(); StringRef CurFilename = InstrInfo[FI.Instrs[J]].Filename; assert(FileNameRegistry.Infos.count(CurFilename)); size_t IndexInStringTable = FileNameRegistry.Infos[CurFilename].FilenameID; // Each segment starts with the offset of the filename // in the string table. Asm->OutStreamer->AddComment( "Segment for file '" + Twine(CurFilename) + "' begins"); MCSymbol *FileSegmentBegin = Asm->MMI->getContext().createTempSymbol(); Asm->OutStreamer->EmitLabel(FileSegmentBegin); Asm->EmitInt32(8 * IndexInStringTable); // Number of PC records in the lookup table. size_t SegmentLength = FilenameSegmentLengths[J]; Asm->EmitInt32(SegmentLength); // Full size of the segment for this filename, including the prev two // records. FileSegmentEnd = Asm->MMI->getContext().createTempSymbol(); EmitLabelDiff(*Asm->OutStreamer, FileSegmentBegin, FileSegmentEnd); LastSegmentStart = J; } // The first PC with the given linenumber and the linenumber itself. EmitLabelDiff(*Asm->OutStreamer, Fn, Instr); Asm->EmitInt32(InstrInfo[Instr].LineNumber); } FinishPreviousChunk(); Asm->OutStreamer->EmitLabel(LineTableEnd); } void WinCodeViewLineTables::beginFunction(const MachineFunction *MF) { assert(!CurFn && "Can't process two functions at once!"); if (!Asm || !Asm->MMI->hasDebugInfo()) return; const Function *GV = MF->getFunction(); assert(FnDebugInfo.count(GV) == false); VisitedFunctions.push_back(GV); CurFn = &FnDebugInfo[GV]; // Find the end of the function prolog. // FIXME: is there a simpler a way to do this? Can we just search // for the first instruction of the function, not the last of the prolog? DebugLoc PrologEndLoc; bool EmptyPrologue = true; for (const auto &MBB : *MF) { if (PrologEndLoc) break; for (const auto &MI : MBB) { if (MI.isDebugValue()) continue; // First known non-DBG_VALUE and non-frame setup location marks // the beginning of the function body. // FIXME: do we need the first subcondition? if (!MI.getFlag(MachineInstr::FrameSetup) && MI.getDebugLoc()) { PrologEndLoc = MI.getDebugLoc(); break; } EmptyPrologue = false; } } // Record beginning of function if we have a non-empty prologue. if (PrologEndLoc && !EmptyPrologue) { DebugLoc FnStartDL = PrologEndLoc.getFnDebugLoc(); maybeRecordLocation(FnStartDL, MF); } } void WinCodeViewLineTables::endFunction(const MachineFunction *MF) { if (!Asm || !CurFn) // We haven't created any debug info for this function. return; const Function *GV = MF->getFunction(); assert(FnDebugInfo.count(GV)); assert(CurFn == &FnDebugInfo[GV]); if (CurFn->Instrs.empty()) { FnDebugInfo.erase(GV); VisitedFunctions.pop_back(); } else { CurFn->End = Asm->getFunctionEnd(); } CurFn = nullptr; } void WinCodeViewLineTables::beginInstruction(const MachineInstr *MI) { // Ignore DBG_VALUE locations and function prologue. if (!Asm || MI->isDebugValue() || MI->getFlag(MachineInstr::FrameSetup)) return; DebugLoc DL = MI->getDebugLoc(); if (DL == PrevInstLoc || !DL) return; maybeRecordLocation(DL, Asm->MF); } }
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
//==-- llvm/CodeGen/DwarfAccelTable.h - Dwarf Accelerator Tables -*- C++ -*-==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing dwarf accelerator tables. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DWARFACCELTABLE_H #define LLVM_LIB_CODEGEN_ASMPRINTER_DWARFACCELTABLE_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringMap.h" #include "llvm/CodeGen/DIE.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Format.h" #include "llvm/Support/FormattedStream.h" #include <vector> // The dwarf accelerator tables are an indirect hash table optimized // for null lookup rather than access to known data. They are output into // an on-disk format that looks like this: // // .-------------. // | HEADER | // |-------------| // | BUCKETS | // |-------------| // | HASHES | // |-------------| // | OFFSETS | // |-------------| // | DATA | // `-------------' // // where the header contains a magic number, version, type of hash function, // the number of buckets, total number of hashes, and room for a special // struct of data and the length of that struct. // // The buckets contain an index (e.g. 6) into the hashes array. The hashes // section contains all of the 32-bit hash values in contiguous memory, and // the offsets contain the offset into the data area for the particular // hash. // // For a lookup example, we could hash a function name and take it modulo the // number of buckets giving us our bucket. From there we take the bucket value // as an index into the hashes table and look at each successive hash as long // as the hash value is still the same modulo result (bucket value) as earlier. // If we have a match we look at that same entry in the offsets table and // grab the offset in the data for our final match. namespace llvm { class AsmPrinter; class DwarfDebug; class DwarfAccelTable { static uint32_t HashDJB(StringRef Str) { uint32_t h = 5381; for (unsigned i = 0, e = Str.size(); i != e; ++i) h = ((h << 5) + h) + Str[i]; return h; } // Helper function to compute the number of buckets needed based on // the number of unique hashes. void ComputeBucketCount(void); struct TableHeader { uint32_t magic; // 'HASH' magic value to allow endian detection uint16_t version; // Version number. uint16_t hash_function; // The hash function enumeration that was used. uint32_t bucket_count; // The number of buckets in this hash table. uint32_t hashes_count; // The total number of unique hash values // and hash data offsets in this table. uint32_t header_data_len; // The bytes to skip to get to the hash // indexes (buckets) for correct alignment. // Also written to disk is the implementation specific header data. static const uint32_t MagicHash = 0x48415348; TableHeader(uint32_t data_len) : magic(MagicHash), version(1), hash_function(dwarf::DW_hash_function_djb), bucket_count(0), hashes_count(0), header_data_len(data_len) {} #ifndef NDEBUG void print(raw_ostream &O) { O << "Magic: " << format("0x%x", magic) << "\n" << "Version: " << version << "\n" << "Hash Function: " << hash_function << "\n" << "Bucket Count: " << bucket_count << "\n" << "Header Data Length: " << header_data_len << "\n"; } void dump() { print(dbgs()); } #endif }; public: // The HeaderData describes the form of each set of data. In general this // is as a list of atoms (atom_count) where each atom contains a type // (AtomType type) of data, and an encoding form (form). In the case of // data that is referenced via DW_FORM_ref_* the die_offset_base is // used to describe the offset for all forms in the list of atoms. // This also serves as a public interface of sorts. // When written to disk this will have the form: // // uint32_t die_offset_base // uint32_t atom_count // atom_count Atoms // Make these public so that they can be used as a general interface to // the class. struct Atom { uint16_t type; // enum AtomType uint16_t form; // DWARF DW_FORM_ defines LLVM_CONSTEXPR Atom(uint16_t type, uint16_t form) : type(type), form(form) {} #ifndef NDEBUG void print(raw_ostream &O) { O << "Type: " << dwarf::AtomTypeString(type) << "\n" << "Form: " << dwarf::FormEncodingString(form) << "\n"; } void dump() { print(dbgs()); } #endif }; private: struct TableHeaderData { uint32_t die_offset_base; SmallVector<Atom, 3> Atoms; TableHeaderData(ArrayRef<Atom> AtomList, uint32_t offset = 0) : die_offset_base(offset), Atoms(AtomList.begin(), AtomList.end()) {} #ifndef NDEBUG void print(raw_ostream &O) { O << "die_offset_base: " << die_offset_base << "\n"; for (size_t i = 0; i < Atoms.size(); i++) Atoms[i].print(O); } void dump() { print(dbgs()); } #endif }; // The data itself consists of a str_offset, a count of the DIEs in the // hash and the offsets to the DIEs themselves. // On disk each data section is ended with a 0 KeyType as the end of the // hash chain. // On output this looks like: // uint32_t str_offset // uint32_t hash_data_count // HashData[hash_data_count] public: struct HashDataContents { const DIE *Die; // Offsets char Flags; // Specific flags to output HashDataContents(const DIE *D, char Flags) : Die(D), Flags(Flags) {} #ifndef NDEBUG void print(raw_ostream &O) const { O << " Offset: " << Die->getOffset() << "\n"; O << " Tag: " << dwarf::TagString(Die->getTag()) << "\n"; O << " Flags: " << Flags << "\n"; } #endif }; private: // String Data struct DataArray { DwarfStringPoolEntryRef Name; std::vector<HashDataContents *> Values; }; friend struct HashData; struct HashData { StringRef Str; uint32_t HashValue; MCSymbol *Sym; DwarfAccelTable::DataArray &Data; // offsets HashData(StringRef S, DwarfAccelTable::DataArray &Data) : Str(S), Data(Data) { HashValue = DwarfAccelTable::HashDJB(S); } #ifndef NDEBUG void print(raw_ostream &O) { O << "Name: " << Str << "\n"; O << " Hash Value: " << format("0x%x", HashValue) << "\n"; O << " Symbol: "; if (Sym) O << *Sym; else O << "<none>"; O << "\n"; for (HashDataContents *C : Data.Values) { O << " Offset: " << C->Die->getOffset() << "\n"; O << " Tag: " << dwarf::TagString(C->Die->getTag()) << "\n"; O << " Flags: " << C->Flags << "\n"; } } void dump() { print(dbgs()); } #endif }; DwarfAccelTable(const DwarfAccelTable &) = delete; void operator=(const DwarfAccelTable &) = delete; // Internal Functions void EmitHeader(AsmPrinter *); void EmitBuckets(AsmPrinter *); void EmitHashes(AsmPrinter *); void emitOffsets(AsmPrinter *, const MCSymbol *); void EmitData(AsmPrinter *, DwarfDebug *D); // Allocator for HashData and HashDataContents. BumpPtrAllocator Allocator; // Output Variables TableHeader Header; TableHeaderData HeaderData; std::vector<HashData *> Data; typedef StringMap<DataArray, BumpPtrAllocator &> StringEntries; StringEntries Entries; // Buckets/Hashes/Offsets typedef std::vector<HashData *> HashList; typedef std::vector<HashList> BucketList; BucketList Buckets; HashList Hashes; // Public Implementation public: DwarfAccelTable(ArrayRef<DwarfAccelTable::Atom>); void AddName(DwarfStringPoolEntryRef Name, const DIE *Die, char Flags = 0); void FinalizeTable(AsmPrinter *, StringRef); void emit(AsmPrinter *, const MCSymbol *, DwarfDebug *); #ifndef NDEBUG void print(raw_ostream &O); void dump() { print(dbgs()); } #endif }; } #endif
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
//===-- llvm/CodeGen/DwarfUnit.cpp - Dwarf Type and Compile Units ---------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for constructing a dwarf compile unit. // //===----------------------------------------------------------------------===// #include "DwarfUnit.h" #include "DwarfAccelTable.h" #include "DwarfCompileUnit.h" #include "DwarfDebug.h" #include "DwarfExpression.h" #include "llvm/ADT/APFloat.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Mangler.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCSection.h" #include "llvm/MC/MCStreamer.h" #include "llvm/Support/CommandLine.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetSubtargetInfo.h" using namespace llvm; #define DEBUG_TYPE "dwarfdebug" static cl::opt<bool> GenerateDwarfTypeUnits("generate-type-units", cl::Hidden, cl::desc("Generate DWARF4 type units."), cl::init(false)); DIEDwarfExpression::DIEDwarfExpression(const AsmPrinter &AP, DwarfUnit &DU, DIELoc &DIE) : DwarfExpression(*AP.MF->getSubtarget().getRegisterInfo(), AP.getDwarfDebug()->getDwarfVersion()), AP(AP), DU(DU), DIE(DIE) {} void DIEDwarfExpression::EmitOp(uint8_t Op, const char* Comment) { DU.addUInt(DIE, dwarf::DW_FORM_data1, Op); } void DIEDwarfExpression::EmitSigned(int64_t Value) { DU.addSInt(DIE, dwarf::DW_FORM_sdata, Value); } void DIEDwarfExpression::EmitUnsigned(uint64_t Value) { DU.addUInt(DIE, dwarf::DW_FORM_udata, Value); } bool DIEDwarfExpression::isFrameRegister(unsigned MachineReg) { return MachineReg == TRI.getFrameRegister(*AP.MF); } DwarfUnit::DwarfUnit(unsigned UID, dwarf::Tag UnitTag, const DICompileUnit *Node, AsmPrinter *A, DwarfDebug *DW, DwarfFile *DWU) : UniqueID(UID), CUNode(Node), UnitDie(*DIE::get(DIEValueAllocator, UnitTag)), DebugInfoOffset(0), Asm(A), DD(DW), DU(DWU), IndexTyDie(nullptr), Section(nullptr) { assert(UnitTag == dwarf::DW_TAG_compile_unit || UnitTag == dwarf::DW_TAG_type_unit); } DwarfTypeUnit::DwarfTypeUnit(unsigned UID, DwarfCompileUnit &CU, AsmPrinter *A, DwarfDebug *DW, DwarfFile *DWU, MCDwarfDwoLineTable *SplitLineTable) : DwarfUnit(UID, dwarf::DW_TAG_type_unit, CU.getCUNode(), A, DW, DWU), CU(CU), SplitLineTable(SplitLineTable) { if (SplitLineTable) addSectionOffset(UnitDie, dwarf::DW_AT_stmt_list, 0); } DwarfUnit::~DwarfUnit() { for (unsigned j = 0, M = DIEBlocks.size(); j < M; ++j) DIEBlocks[j]->~DIEBlock(); for (unsigned j = 0, M = DIELocs.size(); j < M; ++j) DIELocs[j]->~DIELoc(); } int64_t DwarfUnit::getDefaultLowerBound() const { switch (getLanguage()) { default: break; case dwarf::DW_LANG_C89: case dwarf::DW_LANG_C99: case dwarf::DW_LANG_C: case dwarf::DW_LANG_C_plus_plus: case dwarf::DW_LANG_ObjC: case dwarf::DW_LANG_ObjC_plus_plus: return 0; case dwarf::DW_LANG_Fortran77: case dwarf::DW_LANG_Fortran90: case dwarf::DW_LANG_Fortran95: return 1; // The languages below have valid values only if the DWARF version >= 4. case dwarf::DW_LANG_Java: case dwarf::DW_LANG_Python: case dwarf::DW_LANG_UPC: case dwarf::DW_LANG_D: if (dwarf::DWARF_VERSION >= 4) return 0; break; case dwarf::DW_LANG_Ada83: case dwarf::DW_LANG_Ada95: case dwarf::DW_LANG_Cobol74: case dwarf::DW_LANG_Cobol85: case dwarf::DW_LANG_Modula2: case dwarf::DW_LANG_Pascal83: case dwarf::DW_LANG_PLI: if (dwarf::DWARF_VERSION >= 4) return 1; break; // The languages below have valid values only if the DWARF version >= 5. case dwarf::DW_LANG_OpenCL: case dwarf::DW_LANG_Go: case dwarf::DW_LANG_Haskell: case dwarf::DW_LANG_C_plus_plus_03: case dwarf::DW_LANG_C_plus_plus_11: case dwarf::DW_LANG_OCaml: case dwarf::DW_LANG_Rust: case dwarf::DW_LANG_C11: case dwarf::DW_LANG_Swift: case dwarf::DW_LANG_Dylan: case dwarf::DW_LANG_C_plus_plus_14: if (dwarf::DWARF_VERSION >= 5) return 0; break; case dwarf::DW_LANG_Modula3: case dwarf::DW_LANG_Julia: case dwarf::DW_LANG_Fortran03: case dwarf::DW_LANG_Fortran08: if (dwarf::DWARF_VERSION >= 5) return 1; break; } return -1; } /// Check whether the DIE for this MDNode can be shared across CUs. static bool isShareableAcrossCUs(const DINode *D) { // When the MDNode can be part of the type system, the DIE can be shared // across CUs. // Combining type units and cross-CU DIE sharing is lower value (since // cross-CU DIE sharing is used in LTO and removes type redundancy at that // level already) but may be implementable for some value in projects // building multiple independent libraries with LTO and then linking those // together. return (isa<DIType>(D) || (isa<DISubprogram>(D) && !cast<DISubprogram>(D)->isDefinition())) && !GenerateDwarfTypeUnits; } DIE *DwarfUnit::getDIE(const DINode *D) const { if (isShareableAcrossCUs(D)) return DU->getDIE(D); return MDNodeToDieMap.lookup(D); } void DwarfUnit::insertDIE(const DINode *Desc, DIE *D) { if (isShareableAcrossCUs(Desc)) { DU->insertDIE(Desc, D); return; } MDNodeToDieMap.insert(std::make_pair(Desc, D)); } void DwarfUnit::addFlag(DIE &Die, dwarf::Attribute Attribute) { if (DD->getDwarfVersion() >= 4) Die.addValue(DIEValueAllocator, Attribute, dwarf::DW_FORM_flag_present, DIEInteger(1)); else Die.addValue(DIEValueAllocator, Attribute, dwarf::DW_FORM_flag, DIEInteger(1)); } void DwarfUnit::addUInt(DIE &Die, dwarf::Attribute Attribute, Optional<dwarf::Form> Form, uint64_t Integer) { if (!Form) Form = DIEInteger::BestForm(false, Integer); Die.addValue(DIEValueAllocator, Attribute, *Form, DIEInteger(Integer)); } void DwarfUnit::addUInt(DIE &Block, dwarf::Form Form, uint64_t Integer) { addUInt(Block, (dwarf::Attribute)0, Form, Integer); } void DwarfUnit::addSInt(DIE &Die, dwarf::Attribute Attribute, Optional<dwarf::Form> Form, int64_t Integer) { if (!Form) Form = DIEInteger::BestForm(true, Integer); Die.addValue(DIEValueAllocator, Attribute, *Form, DIEInteger(Integer)); } void DwarfUnit::addSInt(DIELoc &Die, Optional<dwarf::Form> Form, int64_t Integer) { addSInt(Die, (dwarf::Attribute)0, Form, Integer); } void DwarfUnit::addString(DIE &Die, dwarf::Attribute Attribute, StringRef String) { Die.addValue(DIEValueAllocator, Attribute, isDwoUnit() ? dwarf::DW_FORM_GNU_str_index : dwarf::DW_FORM_strp, DIEString(DU->getStringPool().getEntry(*Asm, String))); } DIE::value_iterator DwarfUnit::addLabel(DIE &Die, dwarf::Attribute Attribute, dwarf::Form Form, const MCSymbol *Label) { return Die.addValue(DIEValueAllocator, Attribute, Form, DIELabel(Label)); } void DwarfUnit::addLabel(DIELoc &Die, dwarf::Form Form, const MCSymbol *Label) { addLabel(Die, (dwarf::Attribute)0, Form, Label); } void DwarfUnit::addSectionOffset(DIE &Die, dwarf::Attribute Attribute, uint64_t Integer) { if (DD->getDwarfVersion() >= 4) addUInt(Die, Attribute, dwarf::DW_FORM_sec_offset, Integer); else addUInt(Die, Attribute, dwarf::DW_FORM_data4, Integer); } unsigned DwarfTypeUnit::getOrCreateSourceID(StringRef FileName, StringRef DirName) { return SplitLineTable ? SplitLineTable->getFile(DirName, FileName) : getCU().getOrCreateSourceID(FileName, DirName); } void DwarfUnit::addOpAddress(DIELoc &Die, const MCSymbol *Sym) { if (!DD->useSplitDwarf()) { addUInt(Die, dwarf::DW_FORM_data1, dwarf::DW_OP_addr); addLabel(Die, dwarf::DW_FORM_udata, Sym); } else { addUInt(Die, dwarf::DW_FORM_data1, dwarf::DW_OP_GNU_addr_index); addUInt(Die, dwarf::DW_FORM_GNU_addr_index, DD->getAddressPool().getIndex(Sym)); } } void DwarfUnit::addLabelDelta(DIE &Die, dwarf::Attribute Attribute, const MCSymbol *Hi, const MCSymbol *Lo) { Die.addValue(DIEValueAllocator, Attribute, dwarf::DW_FORM_data4, new (DIEValueAllocator) DIEDelta(Hi, Lo)); } void DwarfUnit::addDIEEntry(DIE &Die, dwarf::Attribute Attribute, DIE &Entry) { addDIEEntry(Die, Attribute, DIEEntry(Entry)); } void DwarfUnit::addDIETypeSignature(DIE &Die, const DwarfTypeUnit &Type) { // Flag the type unit reference as a declaration so that if it contains // members (implicit special members, static data member definitions, member // declarations for definitions in this CU, etc) consumers don't get confused // and think this is a full definition. addFlag(Die, dwarf::DW_AT_declaration); Die.addValue(DIEValueAllocator, dwarf::DW_AT_signature, dwarf::DW_FORM_ref_sig8, DIETypeSignature(Type)); } void DwarfUnit::addDIEEntry(DIE &Die, dwarf::Attribute Attribute, DIEEntry Entry) { const DIE *DieCU = Die.getUnitOrNull(); const DIE *EntryCU = Entry.getEntry().getUnitOrNull(); if (!DieCU) // We assume that Die belongs to this CU, if it is not linked to any CU yet. DieCU = &getUnitDie(); if (!EntryCU) EntryCU = &getUnitDie(); Die.addValue(DIEValueAllocator, Attribute, EntryCU == DieCU ? dwarf::DW_FORM_ref4 : dwarf::DW_FORM_ref_addr, Entry); } DIE &DwarfUnit::createAndAddDIE(unsigned Tag, DIE &Parent, const DINode *N) { assert(Tag != dwarf::DW_TAG_auto_variable && Tag != dwarf::DW_TAG_arg_variable); DIE &Die = Parent.addChild(DIE::get(DIEValueAllocator, (dwarf::Tag)Tag)); if (N) insertDIE(N, &Die); return Die; } void DwarfUnit::addBlock(DIE &Die, dwarf::Attribute Attribute, DIELoc *Loc) { Loc->ComputeSize(Asm); DIELocs.push_back(Loc); // Memoize so we can call the destructor later on. Die.addValue(DIEValueAllocator, Attribute, Loc->BestForm(DD->getDwarfVersion()), Loc); } void DwarfUnit::addBlock(DIE &Die, dwarf::Attribute Attribute, DIEBlock *Block) { Block->ComputeSize(Asm); DIEBlocks.push_back(Block); // Memoize so we can call the destructor later on. Die.addValue(DIEValueAllocator, Attribute, Block->BestForm(), Block); } void DwarfUnit::addSourceLine(DIE &Die, unsigned Line, StringRef File, StringRef Directory) { if (Line == 0) return; unsigned FileID = getOrCreateSourceID(File, Directory); assert(FileID && "Invalid file id"); addUInt(Die, dwarf::DW_AT_decl_file, None, FileID); addUInt(Die, dwarf::DW_AT_decl_line, None, Line); } void DwarfUnit::addSourceLine(DIE &Die, const DILocalVariable *V) { assert(V); addSourceLine(Die, V->getLine(), V->getScope()->getFilename(), V->getScope()->getDirectory()); } void DwarfUnit::addSourceLine(DIE &Die, const DIGlobalVariable *G) { assert(G); addSourceLine(Die, G->getLine(), G->getFilename(), G->getDirectory()); } void DwarfUnit::addSourceLine(DIE &Die, const DISubprogram *SP) { assert(SP); addSourceLine(Die, SP->getLine(), SP->getFilename(), SP->getDirectory()); } void DwarfUnit::addSourceLine(DIE &Die, const DIType *Ty) { assert(Ty); addSourceLine(Die, Ty->getLine(), Ty->getFilename(), Ty->getDirectory()); } void DwarfUnit::addSourceLine(DIE &Die, const DIObjCProperty *Ty) { assert(Ty); addSourceLine(Die, Ty->getLine(), Ty->getFilename(), Ty->getDirectory()); } void DwarfUnit::addSourceLine(DIE &Die, const DINamespace *NS) { addSourceLine(Die, NS->getLine(), NS->getFilename(), NS->getDirectory()); } bool DwarfUnit::addRegisterOpPiece(DIELoc &TheDie, unsigned Reg, unsigned SizeInBits, unsigned OffsetInBits) { DIEDwarfExpression Expr(*Asm, *this, TheDie); Expr.AddMachineRegPiece(Reg, SizeInBits, OffsetInBits); return true; } bool DwarfUnit::addRegisterOffset(DIELoc &TheDie, unsigned Reg, int64_t Offset) { DIEDwarfExpression Expr(*Asm, *this, TheDie); return Expr.AddMachineRegIndirect(Reg, Offset); } /* Byref variables, in Blocks, are declared by the programmer as "SomeType VarName;", but the compiler creates a __Block_byref_x_VarName struct, and gives the variable VarName either the struct, or a pointer to the struct, as its type. This is necessary for various behind-the-scenes things the compiler needs to do with by-reference variables in Blocks. However, as far as the original *programmer* is concerned, the variable should still have type 'SomeType', as originally declared. The function getBlockByrefType dives into the __Block_byref_x_VarName struct to find the original type of the variable, which is then assigned to the variable's Debug Information Entry as its real type. So far, so good. However now the debugger will expect the variable VarName to have the type SomeType. So we need the location attribute for the variable to be an expression that explains to the debugger how to navigate through the pointers and struct to find the actual variable of type SomeType. The following function does just that. We start by getting the "normal" location for the variable. This will be the location of either the struct __Block_byref_x_VarName or the pointer to the struct __Block_byref_x_VarName. The struct will look something like: struct __Block_byref_x_VarName { ... <various fields> struct __Block_byref_x_VarName *forwarding; ... <various other fields> SomeType VarName; ... <maybe more fields> }; If we are given the struct directly (as our starting point) we need to tell the debugger to: 1). Add the offset of the forwarding field. 2). Follow that pointer to get the real __Block_byref_x_VarName struct to use (the real one may have been copied onto the heap). 3). Add the offset for the field VarName, to find the actual variable. If we started with a pointer to the struct, then we need to dereference that pointer first, before the other steps. Translating this into DWARF ops, we will need to append the following to the current location description for the variable: DW_OP_deref -- optional, if we start with a pointer DW_OP_plus_uconst <forward_fld_offset> DW_OP_deref DW_OP_plus_uconst <varName_fld_offset> That is what this function does. */ void DwarfUnit::addBlockByrefAddress(const DbgVariable &DV, DIE &Die, dwarf::Attribute Attribute, const MachineLocation &Location) { const DIType *Ty = DV.getType(); const DIType *TmpTy = Ty; uint16_t Tag = Ty->getTag(); bool isPointer = false; StringRef varName = DV.getName(); if (Tag == dwarf::DW_TAG_pointer_type) { auto *DTy = cast<DIDerivedType>(Ty); TmpTy = resolve(DTy->getBaseType()); isPointer = true; } // Find the __forwarding field and the variable field in the __Block_byref // struct. DINodeArray Fields = cast<DICompositeTypeBase>(TmpTy)->getElements(); const DIDerivedType *varField = nullptr; const DIDerivedType *forwardingField = nullptr; for (unsigned i = 0, N = Fields.size(); i < N; ++i) { auto *DT = cast<DIDerivedType>(Fields[i]); StringRef fieldName = DT->getName(); if (fieldName == "__forwarding") forwardingField = DT; else if (fieldName == varName) varField = DT; } // Get the offsets for the forwarding field and the variable field. unsigned forwardingFieldOffset = forwardingField->getOffsetInBits() >> 3; unsigned varFieldOffset = varField->getOffsetInBits() >> 2; // Decode the original location, and use that as the start of the byref // variable's location. DIELoc *Loc = new (DIEValueAllocator) DIELoc; bool validReg; if (Location.isReg()) validReg = addRegisterOpPiece(*Loc, Location.getReg()); else validReg = addRegisterOffset(*Loc, Location.getReg(), Location.getOffset()); if (!validReg) return; // If we started with a pointer to the __Block_byref... struct, then // the first thing we need to do is dereference the pointer (DW_OP_deref). if (isPointer) addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_deref); // Next add the offset for the '__forwarding' field: // DW_OP_plus_uconst ForwardingFieldOffset. Note there's no point in // adding the offset if it's 0. if (forwardingFieldOffset > 0) { addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst); addUInt(*Loc, dwarf::DW_FORM_udata, forwardingFieldOffset); } // Now dereference the __forwarding field to get to the real __Block_byref // struct: DW_OP_deref. addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_deref); // Now that we've got the real __Block_byref... struct, add the offset // for the variable's field to get to the location of the actual variable: // DW_OP_plus_uconst varFieldOffset. Again, don't add if it's 0. if (varFieldOffset > 0) { addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst); addUInt(*Loc, dwarf::DW_FORM_udata, varFieldOffset); } // Now attach the location information to the DIE. addBlock(Die, Attribute, Loc); } /// Return true if type encoding is unsigned. static bool isUnsignedDIType(DwarfDebug *DD, const DIType *Ty) { if (auto *DTy = dyn_cast<DIDerivedTypeBase>(Ty)) { dwarf::Tag T = (dwarf::Tag)Ty->getTag(); // Encode pointer constants as unsigned bytes. This is used at least for // null pointer constant emission. // (Pieces of) aggregate types that get hacked apart by SROA may also be // represented by a constant. Encode them as unsigned bytes. // FIXME: reference and rvalue_reference /probably/ shouldn't be allowed // here, but accept them for now due to a bug in SROA producing bogus // dbg.values. if (T == dwarf::DW_TAG_array_type || T == dwarf::DW_TAG_class_type || T == dwarf::DW_TAG_pointer_type || T == dwarf::DW_TAG_ptr_to_member_type || T == dwarf::DW_TAG_reference_type || T == dwarf::DW_TAG_rvalue_reference_type || T == dwarf::DW_TAG_structure_type || T == dwarf::DW_TAG_union_type) return true; assert(T == dwarf::DW_TAG_typedef || T == dwarf::DW_TAG_const_type || T == dwarf::DW_TAG_volatile_type || T == dwarf::DW_TAG_restrict_type || T == dwarf::DW_TAG_enumeration_type); if (DITypeRef Deriv = DTy->getBaseType()) return isUnsignedDIType(DD, DD->resolve(Deriv)); // FIXME: Enums without a fixed underlying type have unknown signedness // here, leading to incorrectly emitted constants. assert(DTy->getTag() == dwarf::DW_TAG_enumeration_type); return false; } auto *BTy = cast<DIBasicType>(Ty); unsigned Encoding = BTy->getEncoding(); assert((Encoding == dwarf::DW_ATE_unsigned || Encoding == dwarf::DW_ATE_unsigned_char || Encoding == dwarf::DW_ATE_signed || Encoding == dwarf::DW_ATE_signed_char || Encoding == dwarf::DW_ATE_float || Encoding == dwarf::DW_ATE_UTF || Encoding == dwarf::DW_ATE_boolean || (Ty->getTag() == dwarf::DW_TAG_unspecified_type && Ty->getName() == "decltype(nullptr)")) && "Unsupported encoding"); return Encoding == dwarf::DW_ATE_unsigned || Encoding == dwarf::DW_ATE_unsigned_char || Encoding == dwarf::DW_ATE_UTF || Encoding == dwarf::DW_ATE_boolean || Ty->getTag() == dwarf::DW_TAG_unspecified_type; } /// If this type is derived from a base type then return base type size. static uint64_t getBaseTypeSize(DwarfDebug *DD, const DIDerivedType *Ty) { unsigned Tag = Ty->getTag(); if (Tag != dwarf::DW_TAG_member && Tag != dwarf::DW_TAG_typedef && Tag != dwarf::DW_TAG_const_type && Tag != dwarf::DW_TAG_volatile_type && Tag != dwarf::DW_TAG_restrict_type) return Ty->getSizeInBits(); auto *BaseType = DD->resolve(Ty->getBaseType()); assert(BaseType && "Unexpected invalid base type"); // If this is a derived type, go ahead and get the base type, unless it's a // reference then it's just the size of the field. Pointer types have no need // of this since they're a different type of qualification on the type. if (BaseType->getTag() == dwarf::DW_TAG_reference_type || BaseType->getTag() == dwarf::DW_TAG_rvalue_reference_type) return Ty->getSizeInBits(); if (auto *DT = dyn_cast<DIDerivedType>(BaseType)) return getBaseTypeSize(DD, DT); return BaseType->getSizeInBits(); } void DwarfUnit::addConstantFPValue(DIE &Die, const MachineOperand &MO) { assert(MO.isFPImm() && "Invalid machine operand!"); DIEBlock *Block = new (DIEValueAllocator) DIEBlock; APFloat FPImm = MO.getFPImm()->getValueAPF(); // Get the raw data form of the floating point. const APInt FltVal = FPImm.bitcastToAPInt(); const char *FltPtr = (const char *)FltVal.getRawData(); int NumBytes = FltVal.getBitWidth() / 8; // 8 bits per byte. bool LittleEndian = Asm->getDataLayout().isLittleEndian(); int Incr = (LittleEndian ? 1 : -1); int Start = (LittleEndian ? 0 : NumBytes - 1); int Stop = (LittleEndian ? NumBytes : -1); // Output the constant to DWARF one byte at a time. for (; Start != Stop; Start += Incr) addUInt(*Block, dwarf::DW_FORM_data1, (unsigned char)0xFF & FltPtr[Start]); addBlock(Die, dwarf::DW_AT_const_value, Block); } void DwarfUnit::addConstantFPValue(DIE &Die, const ConstantFP *CFP) { // Pass this down to addConstantValue as an unsigned bag of bits. addConstantValue(Die, CFP->getValueAPF().bitcastToAPInt(), true); } void DwarfUnit::addConstantValue(DIE &Die, const ConstantInt *CI, const DIType *Ty) { addConstantValue(Die, CI->getValue(), Ty); } void DwarfUnit::addConstantValue(DIE &Die, const MachineOperand &MO, const DIType *Ty) { assert(MO.isImm() && "Invalid machine operand!"); addConstantValue(Die, isUnsignedDIType(DD, Ty), MO.getImm()); } void DwarfUnit::addConstantValue(DIE &Die, bool Unsigned, uint64_t Val) { // FIXME: This is a bit conservative/simple - it emits negative values always // sign extended to 64 bits rather than minimizing the number of bytes. addUInt(Die, dwarf::DW_AT_const_value, Unsigned ? dwarf::DW_FORM_udata : dwarf::DW_FORM_sdata, Val); } void DwarfUnit::addConstantValue(DIE &Die, const APInt &Val, const DIType *Ty) { addConstantValue(Die, Val, isUnsignedDIType(DD, Ty)); } void DwarfUnit::addConstantValue(DIE &Die, const APInt &Val, bool Unsigned) { unsigned CIBitWidth = Val.getBitWidth(); if (CIBitWidth <= 64) { addConstantValue(Die, Unsigned, Unsigned ? Val.getZExtValue() : Val.getSExtValue()); return; } DIEBlock *Block = new (DIEValueAllocator) DIEBlock; // Get the raw data form of the large APInt. const uint64_t *Ptr64 = Val.getRawData(); int NumBytes = Val.getBitWidth() / 8; // 8 bits per byte. bool LittleEndian = Asm->getDataLayout().isLittleEndian(); // Output the constant to DWARF one byte at a time. for (int i = 0; i < NumBytes; i++) { uint8_t c; if (LittleEndian) c = Ptr64[i / 8] >> (8 * (i & 7)); else c = Ptr64[(NumBytes - 1 - i) / 8] >> (8 * ((NumBytes - 1 - i) & 7)); addUInt(*Block, dwarf::DW_FORM_data1, c); } addBlock(Die, dwarf::DW_AT_const_value, Block); } void DwarfUnit::addLinkageName(DIE &Die, StringRef LinkageName) { if (!LinkageName.empty()) addString(Die, DD->getDwarfVersion() >= 4 ? dwarf::DW_AT_linkage_name : dwarf::DW_AT_MIPS_linkage_name, GlobalValue::getRealLinkageName(LinkageName)); } void DwarfUnit::addTemplateParams(DIE &Buffer, DINodeArray TParams) { // Add template parameters. for (const auto *Element : TParams) { if (auto *TTP = dyn_cast<DITemplateTypeParameter>(Element)) constructTemplateTypeParameterDIE(Buffer, TTP); else if (auto *TVP = dyn_cast<DITemplateValueParameter>(Element)) constructTemplateValueParameterDIE(Buffer, TVP); } } DIE *DwarfUnit::getOrCreateContextDIE(const DIScope *Context) { if (!Context || isa<DIFile>(Context)) return &getUnitDie(); if (auto *T = dyn_cast<DIType>(Context)) return getOrCreateTypeDIE(T); if (auto *NS = dyn_cast<DINamespace>(Context)) return getOrCreateNameSpace(NS); if (auto *SP = dyn_cast<DISubprogram>(Context)) return getOrCreateSubprogramDIE(SP); return getDIE(Context); } DIE *DwarfUnit::createTypeDIE(const DICompositeType *Ty) { auto *Context = resolve(Ty->getScope()); DIE *ContextDIE = getOrCreateContextDIE(Context); if (DIE *TyDIE = getDIE(Ty)) return TyDIE; // Create new type. DIE &TyDIE = createAndAddDIE(Ty->getTag(), *ContextDIE, Ty); constructTypeDIE(TyDIE, cast<DICompositeType>(Ty)); updateAcceleratorTables(Context, Ty, TyDIE); return &TyDIE; } DIE *DwarfUnit::getOrCreateTypeDIE(const MDNode *TyNode) { if (!TyNode) return nullptr; auto *Ty = cast<DIType>(TyNode); assert(Ty == resolve(Ty->getRef()) && "type was not uniqued, possible ODR violation."); // DW_TAG_restrict_type is not supported in DWARF2 if (Ty->getTag() == dwarf::DW_TAG_restrict_type && DD->getDwarfVersion() <= 2) return getOrCreateTypeDIE(resolve(cast<DIDerivedType>(Ty)->getBaseType())); // Construct the context before querying for the existence of the DIE in case // such construction creates the DIE. auto *Context = resolve(Ty->getScope()); DIE *ContextDIE = getOrCreateContextDIE(Context); assert(ContextDIE); if (DIE *TyDIE = getDIE(Ty)) return TyDIE; // Create new type. DIE &TyDIE = createAndAddDIE(Ty->getTag(), *ContextDIE, Ty); updateAcceleratorTables(Context, Ty, TyDIE); if (auto *BT = dyn_cast<DIBasicType>(Ty)) constructTypeDIE(TyDIE, BT); else if (auto *STy = dyn_cast<DISubroutineType>(Ty)) constructTypeDIE(TyDIE, STy); else if (auto *CTy = dyn_cast<DICompositeType>(Ty)) { if (GenerateDwarfTypeUnits && !Ty->isForwardDecl()) if (MDString *TypeId = CTy->getRawIdentifier()) { DD->addDwarfTypeUnitType(getCU(), TypeId->getString(), TyDIE, CTy); // Skip updating the accelerator tables since this is not the full type. return &TyDIE; } constructTypeDIE(TyDIE, CTy); } else { constructTypeDIE(TyDIE, cast<DIDerivedType>(Ty)); } return &TyDIE; } void DwarfUnit::updateAcceleratorTables(const DIScope *Context, const DIType *Ty, const DIE &TyDIE) { if (!Ty->getName().empty() && !Ty->isForwardDecl()) { bool IsImplementation = 0; if (auto *CT = dyn_cast<DICompositeTypeBase>(Ty)) { // A runtime language of 0 actually means C/C++ and that any // non-negative value is some version of Objective-C/C++. IsImplementation = CT->getRuntimeLang() == 0 || CT->isObjcClassComplete(); } unsigned Flags = IsImplementation ? dwarf::DW_FLAG_type_implementation : 0; DD->addAccelType(Ty->getName(), TyDIE, Flags); if (!Context || isa<DICompileUnit>(Context) || isa<DIFile>(Context) || isa<DINamespace>(Context)) addGlobalType(Ty, TyDIE, Context); } } void DwarfUnit::addType(DIE &Entity, const DIType *Ty, dwarf::Attribute Attribute) { assert(Ty && "Trying to add a type that doesn't exist?"); addDIEEntry(Entity, Attribute, DIEEntry(*getOrCreateTypeDIE(Ty))); } std::string DwarfUnit::getParentContextString(const DIScope *Context) const { if (!Context) return ""; // FIXME: Decide whether to implement this for non-C++ languages. if (getLanguage() != dwarf::DW_LANG_C_plus_plus) return ""; std::string CS; SmallVector<const DIScope *, 1> Parents; while (!isa<DICompileUnit>(Context)) { Parents.push_back(Context); if (Context->getScope()) Context = resolve(Context->getScope()); else // Structure, etc types will have a NULL context if they're at the top // level. break; } // Reverse iterate over our list to go from the outermost construct to the // innermost. for (auto I = Parents.rbegin(), E = Parents.rend(); I != E; ++I) { const DIScope *Ctx = *I; StringRef Name = Ctx->getName(); if (Name.empty() && isa<DINamespace>(Ctx)) Name = "(anonymous namespace)"; if (!Name.empty()) { CS += Name; CS += "::"; } } return CS; } void DwarfUnit::constructTypeDIE(DIE &Buffer, const DIBasicType *BTy) { // Get core information. StringRef Name = BTy->getName(); // Add name if not anonymous or intermediate type. if (!Name.empty()) addString(Buffer, dwarf::DW_AT_name, Name); // An unspecified type only has a name attribute. if (BTy->getTag() == dwarf::DW_TAG_unspecified_type) return; addUInt(Buffer, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1, BTy->getEncoding()); uint64_t Size = BTy->getSizeInBits() >> 3; addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size); } void DwarfUnit::constructTypeDIE(DIE &Buffer, const DIDerivedType *DTy) { // Get core information. StringRef Name = DTy->getName(); uint64_t Size = DTy->getSizeInBits() >> 3; uint16_t Tag = Buffer.getTag(); // Map to main type, void will not have a type. const DIType *FromTy = resolve(DTy->getBaseType()); if (FromTy) addType(Buffer, FromTy); // Add name if not anonymous or intermediate type. if (!Name.empty()) addString(Buffer, dwarf::DW_AT_name, Name); // Add size if non-zero (derived types might be zero-sized.) if (Size && Tag != dwarf::DW_TAG_pointer_type && Tag != dwarf::DW_TAG_ptr_to_member_type) addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size); if (Tag == dwarf::DW_TAG_ptr_to_member_type) addDIEEntry( Buffer, dwarf::DW_AT_containing_type, *getOrCreateTypeDIE(resolve(cast<DIDerivedType>(DTy)->getClassType()))); // Add source line info if available and TyDesc is not a forward declaration. if (!DTy->isForwardDecl()) addSourceLine(Buffer, DTy); } void DwarfUnit::constructSubprogramArguments(DIE &Buffer, DITypeRefArray Args) { for (unsigned i = 1, N = Args.size(); i < N; ++i) { const DIType *Ty = resolve(Args[i]); if (!Ty) { assert(i == N-1 && "Unspecified parameter must be the last argument"); createAndAddDIE(dwarf::DW_TAG_unspecified_parameters, Buffer); } else { DIE &Arg = createAndAddDIE(dwarf::DW_TAG_formal_parameter, Buffer); addType(Arg, Ty); if (Ty->isArtificial()) addFlag(Arg, dwarf::DW_AT_artificial); } } } void DwarfUnit::constructTypeDIE(DIE &Buffer, const DISubroutineType *CTy) { // Add return type. A void return won't have a type. auto Elements = cast<DISubroutineType>(CTy)->getTypeArray(); if (Elements.size()) if (auto RTy = resolve(Elements[0])) addType(Buffer, RTy); bool isPrototyped = true; if (Elements.size() == 2 && !Elements[1]) isPrototyped = false; constructSubprogramArguments(Buffer, Elements); // Add prototype flag if we're dealing with a C language and the function has // been prototyped. uint16_t Language = getLanguage(); if (isPrototyped && (Language == dwarf::DW_LANG_C89 || Language == dwarf::DW_LANG_C99 || Language == dwarf::DW_LANG_ObjC)) addFlag(Buffer, dwarf::DW_AT_prototyped); if (CTy->isLValueReference()) addFlag(Buffer, dwarf::DW_AT_reference); if (CTy->isRValueReference()) addFlag(Buffer, dwarf::DW_AT_rvalue_reference); } void DwarfUnit::constructTypeDIE(DIE &Buffer, const DICompositeType *CTy) { // Add name if not anonymous or intermediate type. StringRef Name = CTy->getName(); uint64_t Size = CTy->getSizeInBits() >> 3; uint16_t Tag = Buffer.getTag(); switch (Tag) { case dwarf::DW_TAG_array_type: constructArrayTypeDIE(Buffer, CTy); break; case dwarf::DW_TAG_enumeration_type: constructEnumTypeDIE(Buffer, CTy); break; case dwarf::DW_TAG_structure_type: case dwarf::DW_TAG_union_type: case dwarf::DW_TAG_class_type: { // Add elements to structure type. DINodeArray Elements = CTy->getElements(); for (const auto *Element : Elements) { if (!Element) continue; if (auto *SP = dyn_cast<DISubprogram>(Element)) getOrCreateSubprogramDIE(SP); else if (auto *DDTy = dyn_cast<DIDerivedType>(Element)) { if (DDTy->getTag() == dwarf::DW_TAG_friend) { DIE &ElemDie = createAndAddDIE(dwarf::DW_TAG_friend, Buffer); addType(ElemDie, resolve(DDTy->getBaseType()), dwarf::DW_AT_friend); } else if (DDTy->isStaticMember()) { getOrCreateStaticMemberDIE(DDTy); } else { constructMemberDIE(Buffer, DDTy); } } else if (auto *Property = dyn_cast<DIObjCProperty>(Element)) { DIE &ElemDie = createAndAddDIE(Property->getTag(), Buffer); StringRef PropertyName = Property->getName(); addString(ElemDie, dwarf::DW_AT_APPLE_property_name, PropertyName); if (Property->getType()) addType(ElemDie, resolve(Property->getType())); addSourceLine(ElemDie, Property); StringRef GetterName = Property->getGetterName(); if (!GetterName.empty()) addString(ElemDie, dwarf::DW_AT_APPLE_property_getter, GetterName); StringRef SetterName = Property->getSetterName(); if (!SetterName.empty()) addString(ElemDie, dwarf::DW_AT_APPLE_property_setter, SetterName); if (unsigned PropertyAttributes = Property->getAttributes()) addUInt(ElemDie, dwarf::DW_AT_APPLE_property_attribute, None, PropertyAttributes); } } if (CTy->isAppleBlockExtension()) addFlag(Buffer, dwarf::DW_AT_APPLE_block); // This is outside the DWARF spec, but GDB expects a DW_AT_containing_type // inside C++ composite types to point to the base class with the vtable. if (auto *ContainingType = dyn_cast_or_null<DICompositeType>(resolve(CTy->getVTableHolder()))) addDIEEntry(Buffer, dwarf::DW_AT_containing_type, *getOrCreateTypeDIE(ContainingType)); if (CTy->isObjcClassComplete()) addFlag(Buffer, dwarf::DW_AT_APPLE_objc_complete_type); // Add template parameters to a class, structure or union types. // FIXME: The support isn't in the metadata for this yet. if (Tag == dwarf::DW_TAG_class_type || Tag == dwarf::DW_TAG_structure_type || Tag == dwarf::DW_TAG_union_type) addTemplateParams(Buffer, CTy->getTemplateParams()); break; } default: break; } // Add name if not anonymous or intermediate type. if (!Name.empty()) addString(Buffer, dwarf::DW_AT_name, Name); if (Tag == dwarf::DW_TAG_enumeration_type || Tag == dwarf::DW_TAG_class_type || Tag == dwarf::DW_TAG_structure_type || Tag == dwarf::DW_TAG_union_type) { // Add size if non-zero (derived types might be zero-sized.) // TODO: Do we care about size for enum forward declarations? if (Size) addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size); else if (!CTy->isForwardDecl()) // Add zero size if it is not a forward declaration. addUInt(Buffer, dwarf::DW_AT_byte_size, None, 0); // If we're a forward decl, say so. if (CTy->isForwardDecl()) addFlag(Buffer, dwarf::DW_AT_declaration); // Add source line info if available. if (!CTy->isForwardDecl()) addSourceLine(Buffer, CTy); // No harm in adding the runtime language to the declaration. unsigned RLang = CTy->getRuntimeLang(); if (RLang) addUInt(Buffer, dwarf::DW_AT_APPLE_runtime_class, dwarf::DW_FORM_data1, RLang); } } void DwarfUnit::constructTemplateTypeParameterDIE( DIE &Buffer, const DITemplateTypeParameter *TP) { DIE &ParamDIE = createAndAddDIE(dwarf::DW_TAG_template_type_parameter, Buffer); // Add the type if it exists, it could be void and therefore no type. if (TP->getType()) addType(ParamDIE, resolve(TP->getType())); if (!TP->getName().empty()) addString(ParamDIE, dwarf::DW_AT_name, TP->getName()); } void DwarfUnit::constructTemplateValueParameterDIE( DIE &Buffer, const DITemplateValueParameter *VP) { DIE &ParamDIE = createAndAddDIE(VP->getTag(), Buffer); // Add the type if there is one, template template and template parameter // packs will not have a type. if (VP->getTag() == dwarf::DW_TAG_template_value_parameter) addType(ParamDIE, resolve(VP->getType())); if (!VP->getName().empty()) addString(ParamDIE, dwarf::DW_AT_name, VP->getName()); if (Metadata *Val = VP->getValue()) { if (ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(Val)) addConstantValue(ParamDIE, CI, resolve(VP->getType())); else if (GlobalValue *GV = mdconst::dyn_extract<GlobalValue>(Val)) { // For declaration non-type template parameters (such as global values and // functions) DIELoc *Loc = new (DIEValueAllocator) DIELoc; addOpAddress(*Loc, Asm->getSymbol(GV)); // Emit DW_OP_stack_value to use the address as the immediate value of the // parameter, rather than a pointer to it. addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_stack_value); addBlock(ParamDIE, dwarf::DW_AT_location, Loc); } else if (VP->getTag() == dwarf::DW_TAG_GNU_template_template_param) { assert(isa<MDString>(Val)); addString(ParamDIE, dwarf::DW_AT_GNU_template_name, cast<MDString>(Val)->getString()); } else if (VP->getTag() == dwarf::DW_TAG_GNU_template_parameter_pack) { addTemplateParams(ParamDIE, cast<MDTuple>(Val)); } } } DIE *DwarfUnit::getOrCreateNameSpace(const DINamespace *NS) { // Construct the context before querying for the existence of the DIE in case // such construction creates the DIE. DIE *ContextDIE = getOrCreateContextDIE(NS->getScope()); if (DIE *NDie = getDIE(NS)) return NDie; DIE &NDie = createAndAddDIE(dwarf::DW_TAG_namespace, *ContextDIE, NS); StringRef Name = NS->getName(); if (!Name.empty()) addString(NDie, dwarf::DW_AT_name, NS->getName()); else Name = "(anonymous namespace)"; DD->addAccelNamespace(Name, NDie); addGlobalName(Name, NDie, NS->getScope()); addSourceLine(NDie, NS); return &NDie; } DIE *DwarfUnit::getOrCreateModule(const DIModule *M) { // Construct the context before querying for the existence of the DIE in case // such construction creates the DIE. DIE *ContextDIE = getOrCreateContextDIE(M->getScope()); if (DIE *MDie = getDIE(M)) return MDie; DIE &MDie = createAndAddDIE(dwarf::DW_TAG_module, *ContextDIE, M); if (!M->getName().empty()) { addString(MDie, dwarf::DW_AT_name, M->getName()); addGlobalName(M->getName(), MDie, M->getScope()); } if (!M->getConfigurationMacros().empty()) addString(MDie, dwarf::DW_AT_LLVM_config_macros, M->getConfigurationMacros()); if (!M->getIncludePath().empty()) addString(MDie, dwarf::DW_AT_LLVM_include_path, M->getIncludePath()); if (!M->getISysRoot().empty()) addString(MDie, dwarf::DW_AT_LLVM_isysroot, M->getISysRoot()); return &MDie; } DIE *DwarfUnit::getOrCreateSubprogramDIE(const DISubprogram *SP, bool Minimal) { // Construct the context before querying for the existence of the DIE in case // such construction creates the DIE (as is the case for member function // declarations). DIE *ContextDIE = Minimal ? &getUnitDie() : getOrCreateContextDIE(resolve(SP->getScope())); if (DIE *SPDie = getDIE(SP)) return SPDie; if (auto *SPDecl = SP->getDeclaration()) { if (!Minimal) { // Add subprogram definitions to the CU die directly. ContextDIE = &getUnitDie(); // Build the decl now to ensure it precedes the definition. getOrCreateSubprogramDIE(SPDecl); } } // DW_TAG_inlined_subroutine may refer to this DIE. DIE &SPDie = createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE, SP); // Stop here and fill this in later, depending on whether or not this // subprogram turns out to have inlined instances or not. if (SP->isDefinition()) return &SPDie; applySubprogramAttributes(SP, SPDie); return &SPDie; } bool DwarfUnit::applySubprogramDefinitionAttributes(const DISubprogram *SP, DIE &SPDie) { DIE *DeclDie = nullptr; StringRef DeclLinkageName; if (auto *SPDecl = SP->getDeclaration()) { DeclDie = getDIE(SPDecl); assert(DeclDie && "This DIE should've already been constructed when the " "definition DIE was created in " "getOrCreateSubprogramDIE"); DeclLinkageName = SPDecl->getLinkageName(); } // Add function template parameters. addTemplateParams(SPDie, SP->getTemplateParams()); // Add the linkage name if we have one and it isn't in the Decl. StringRef LinkageName = SP->getLinkageName(); assert(((LinkageName.empty() || DeclLinkageName.empty()) || LinkageName == DeclLinkageName) && "decl has a linkage name and it is different"); if (DeclLinkageName.empty()) addLinkageName(SPDie, LinkageName); if (!DeclDie) return false; // Refer to the function declaration where all the other attributes will be // found. addDIEEntry(SPDie, dwarf::DW_AT_specification, *DeclDie); return true; } void DwarfUnit::applySubprogramAttributes(const DISubprogram *SP, DIE &SPDie, bool Minimal) { if (!Minimal) if (applySubprogramDefinitionAttributes(SP, SPDie)) return; // Constructors and operators for anonymous aggregates do not have names. if (!SP->getName().empty()) addString(SPDie, dwarf::DW_AT_name, SP->getName()); // Skip the rest of the attributes under -gmlt to save space. if (Minimal) return; addSourceLine(SPDie, SP); // Add the prototype if we have a prototype and we have a C like // language. uint16_t Language = getLanguage(); if (SP->isPrototyped() && (Language == dwarf::DW_LANG_C89 || Language == dwarf::DW_LANG_C99 || Language == dwarf::DW_LANG_ObjC)) addFlag(SPDie, dwarf::DW_AT_prototyped); const DISubroutineType *SPTy = SP->getType(); assert(SPTy->getTag() == dwarf::DW_TAG_subroutine_type && "the type of a subprogram should be a subroutine"); auto Args = SPTy->getTypeArray(); // Add a return type. If this is a type like a C/C++ void type we don't add a // return type. if (Args.size()) if (auto Ty = resolve(Args[0])) addType(SPDie, Ty); unsigned VK = SP->getVirtuality(); if (VK) { addUInt(SPDie, dwarf::DW_AT_virtuality, dwarf::DW_FORM_data1, VK); DIELoc *Block = getDIELoc(); addUInt(*Block, dwarf::DW_FORM_data1, dwarf::DW_OP_constu); addUInt(*Block, dwarf::DW_FORM_udata, SP->getVirtualIndex()); addBlock(SPDie, dwarf::DW_AT_vtable_elem_location, Block); ContainingTypeMap.insert( std::make_pair(&SPDie, resolve(SP->getContainingType()))); } if (!SP->isDefinition()) { addFlag(SPDie, dwarf::DW_AT_declaration); // Add arguments. Do not add arguments for subprogram definition. They will // be handled while processing variables. constructSubprogramArguments(SPDie, Args); } if (SP->isArtificial()) addFlag(SPDie, dwarf::DW_AT_artificial); if (!SP->isLocalToUnit()) addFlag(SPDie, dwarf::DW_AT_external); if (SP->isOptimized()) addFlag(SPDie, dwarf::DW_AT_APPLE_optimized); if (unsigned isa = Asm->getISAEncoding()) addUInt(SPDie, dwarf::DW_AT_APPLE_isa, dwarf::DW_FORM_flag, isa); if (SP->isLValueReference()) addFlag(SPDie, dwarf::DW_AT_reference); if (SP->isRValueReference()) addFlag(SPDie, dwarf::DW_AT_rvalue_reference); if (SP->isProtected()) addUInt(SPDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1, dwarf::DW_ACCESS_protected); else if (SP->isPrivate()) addUInt(SPDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1, dwarf::DW_ACCESS_private); else if (SP->isPublic()) addUInt(SPDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1, dwarf::DW_ACCESS_public); if (SP->isExplicit()) addFlag(SPDie, dwarf::DW_AT_explicit); } void DwarfUnit::constructSubrangeDIE(DIE &Buffer, const DISubrange *SR, DIE *IndexTy) { DIE &DW_Subrange = createAndAddDIE(dwarf::DW_TAG_subrange_type, Buffer); addDIEEntry(DW_Subrange, dwarf::DW_AT_type, *IndexTy); // The LowerBound value defines the lower bounds which is typically zero for // C/C++. The Count value is the number of elements. Values are 64 bit. If // Count == -1 then the array is unbounded and we do not emit // DW_AT_lower_bound and DW_AT_count attributes. int64_t LowerBound = SR->getLowerBound(); int64_t DefaultLowerBound = getDefaultLowerBound(); int64_t Count = SR->getCount(); if (DefaultLowerBound == -1 || LowerBound != DefaultLowerBound) addUInt(DW_Subrange, dwarf::DW_AT_lower_bound, None, LowerBound); if (Count != -1) // FIXME: An unbounded array should reference the expression that defines // the array. addUInt(DW_Subrange, dwarf::DW_AT_count, None, Count); } DIE *DwarfUnit::getIndexTyDie() { if (IndexTyDie) return IndexTyDie; // Construct an integer type to use for indexes. IndexTyDie = &createAndAddDIE(dwarf::DW_TAG_base_type, UnitDie); addString(*IndexTyDie, dwarf::DW_AT_name, "sizetype"); addUInt(*IndexTyDie, dwarf::DW_AT_byte_size, None, sizeof(int64_t)); addUInt(*IndexTyDie, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1, dwarf::DW_ATE_unsigned); return IndexTyDie; } void DwarfUnit::constructArrayTypeDIE(DIE &Buffer, const DICompositeType *CTy) { if (CTy->isVector()) addFlag(Buffer, dwarf::DW_AT_GNU_vector); // Emit the element type. addType(Buffer, resolve(CTy->getBaseType())); // Get an anonymous type for index type. // FIXME: This type should be passed down from the front end // as different languages may have different sizes for indexes. DIE *IdxTy = getIndexTyDie(); // Add subranges to array type. DINodeArray Elements = CTy->getElements(); for (unsigned i = 0, N = Elements.size(); i < N; ++i) { // FIXME: Should this really be such a loose cast? if (auto *Element = dyn_cast_or_null<DINode>(Elements[i])) if (Element->getTag() == dwarf::DW_TAG_subrange_type) constructSubrangeDIE(Buffer, cast<DISubrange>(Element), IdxTy); } } void DwarfUnit::constructEnumTypeDIE(DIE &Buffer, const DICompositeType *CTy) { DINodeArray Elements = CTy->getElements(); // Add enumerators to enumeration type. for (unsigned i = 0, N = Elements.size(); i < N; ++i) { auto *Enum = dyn_cast_or_null<DIEnumerator>(Elements[i]); if (Enum) { DIE &Enumerator = createAndAddDIE(dwarf::DW_TAG_enumerator, Buffer); StringRef Name = Enum->getName(); addString(Enumerator, dwarf::DW_AT_name, Name); int64_t Value = Enum->getValue(); addSInt(Enumerator, dwarf::DW_AT_const_value, dwarf::DW_FORM_sdata, Value); } } const DIType *DTy = resolve(CTy->getBaseType()); if (DTy) { addType(Buffer, DTy); addFlag(Buffer, dwarf::DW_AT_enum_class); } } void DwarfUnit::constructContainingTypeDIEs() { for (auto CI = ContainingTypeMap.begin(), CE = ContainingTypeMap.end(); CI != CE; ++CI) { DIE &SPDie = *CI->first; const DINode *D = CI->second; if (!D) continue; DIE *NDie = getDIE(D); if (!NDie) continue; addDIEEntry(SPDie, dwarf::DW_AT_containing_type, *NDie); } } void DwarfUnit::constructMemberDIE(DIE &Buffer, const DIDerivedType *DT) { DIE &MemberDie = createAndAddDIE(DT->getTag(), Buffer); StringRef Name = DT->getName(); if (!Name.empty()) addString(MemberDie, dwarf::DW_AT_name, Name); addType(MemberDie, resolve(DT->getBaseType())); addSourceLine(MemberDie, DT); if (DT->getTag() == dwarf::DW_TAG_inheritance && DT->isVirtual()) { // For C++, virtual base classes are not at fixed offset. Use following // expression to extract appropriate offset from vtable. // BaseAddr = ObAddr + *((*ObAddr) - Offset) DIELoc *VBaseLocationDie = new (DIEValueAllocator) DIELoc; addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_dup); addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_deref); addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_constu); addUInt(*VBaseLocationDie, dwarf::DW_FORM_udata, DT->getOffsetInBits()); addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_minus); addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_deref); addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_plus); addBlock(MemberDie, dwarf::DW_AT_data_member_location, VBaseLocationDie); } else { uint64_t Size = DT->getSizeInBits(); uint64_t FieldSize = getBaseTypeSize(DD, DT); uint64_t OffsetInBytes; if (FieldSize && Size != FieldSize) { // Handle bitfield, assume bytes are 8 bits. addUInt(MemberDie, dwarf::DW_AT_byte_size, None, FieldSize/8); addUInt(MemberDie, dwarf::DW_AT_bit_size, None, Size); // // The DWARF 2 DW_AT_bit_offset is counting the bits between the most // significant bit of the aligned storage unit containing the bit field to // the most significan bit of the bit field. // // FIXME: DWARF 4 states that DW_AT_data_bit_offset (which // counts from the beginning, regardless of endianness) should // be used instead. // // // Struct Align Align Align // v v v v // +-----------+-----*-----+-----*-----+-- // | ... |b1|b2|b3|b4| // +-----------+-----*-----+-----*-----+-- // | | |<-- Size ->| | // |<---- Offset --->| |<--->| // | | | \_ DW_AT_bit_offset (little endian) // | |<--->| // |<--------->| \_ StartBitOffset = DW_AT_bit_offset (big endian) // \ = DW_AT_data_bit_offset (biendian) // \_ OffsetInBytes uint64_t Offset = DT->getOffsetInBits(); uint64_t Align = DT->getAlignInBits() ? DT->getAlignInBits() : FieldSize; uint64_t AlignMask = ~(Align - 1); // The bits from the start of the storage unit to the start of the field. uint64_t StartBitOffset = Offset - (Offset & AlignMask); // The endian-dependent DWARF 2 offset. uint64_t DwarfBitOffset = Asm->getDataLayout().isLittleEndian() ? OffsetToAlignment(Offset + Size, Align) : StartBitOffset; // The byte offset of the field's aligned storage unit inside the struct. OffsetInBytes = (Offset - StartBitOffset) / 8; addUInt(MemberDie, dwarf::DW_AT_bit_offset, None, DwarfBitOffset); } else // This is not a bitfield. OffsetInBytes = DT->getOffsetInBits() / 8; if (DD->getDwarfVersion() <= 2) { DIELoc *MemLocationDie = new (DIEValueAllocator) DIELoc; addUInt(*MemLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst); addUInt(*MemLocationDie, dwarf::DW_FORM_udata, OffsetInBytes); addBlock(MemberDie, dwarf::DW_AT_data_member_location, MemLocationDie); } else addUInt(MemberDie, dwarf::DW_AT_data_member_location, None, OffsetInBytes); } if (DT->isProtected()) addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1, dwarf::DW_ACCESS_protected); else if (DT->isPrivate()) addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1, dwarf::DW_ACCESS_private); // Otherwise C++ member and base classes are considered public. else if (DT->isPublic()) addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1, dwarf::DW_ACCESS_public); if (DT->isVirtual()) addUInt(MemberDie, dwarf::DW_AT_virtuality, dwarf::DW_FORM_data1, dwarf::DW_VIRTUALITY_virtual); // Objective-C properties. if (DINode *PNode = DT->getObjCProperty()) if (DIE *PDie = getDIE(PNode)) MemberDie.addValue(DIEValueAllocator, dwarf::DW_AT_APPLE_property, dwarf::DW_FORM_ref4, DIEEntry(*PDie)); if (DT->isArtificial()) addFlag(MemberDie, dwarf::DW_AT_artificial); } DIE *DwarfUnit::getOrCreateStaticMemberDIE(const DIDerivedType *DT) { if (!DT) return nullptr; // Construct the context before querying for the existence of the DIE in case // such construction creates the DIE. DIE *ContextDIE = getOrCreateContextDIE(resolve(DT->getScope())); assert(dwarf::isType(ContextDIE->getTag()) && "Static member should belong to a type."); if (DIE *StaticMemberDIE = getDIE(DT)) return StaticMemberDIE; DIE &StaticMemberDIE = createAndAddDIE(DT->getTag(), *ContextDIE, DT); const DIType *Ty = resolve(DT->getBaseType()); addString(StaticMemberDIE, dwarf::DW_AT_name, DT->getName()); addType(StaticMemberDIE, Ty); addSourceLine(StaticMemberDIE, DT); addFlag(StaticMemberDIE, dwarf::DW_AT_external); addFlag(StaticMemberDIE, dwarf::DW_AT_declaration); // FIXME: We could omit private if the parent is a class_type, and // public if the parent is something else. if (DT->isProtected()) addUInt(StaticMemberDIE, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1, dwarf::DW_ACCESS_protected); else if (DT->isPrivate()) addUInt(StaticMemberDIE, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1, dwarf::DW_ACCESS_private); else if (DT->isPublic()) addUInt(StaticMemberDIE, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1, dwarf::DW_ACCESS_public); if (const ConstantInt *CI = dyn_cast_or_null<ConstantInt>(DT->getConstant())) addConstantValue(StaticMemberDIE, CI, Ty); if (const ConstantFP *CFP = dyn_cast_or_null<ConstantFP>(DT->getConstant())) addConstantFPValue(StaticMemberDIE, CFP); return &StaticMemberDIE; } void DwarfUnit::emitHeader(bool UseOffsets) { // Emit size of content not including length itself Asm->OutStreamer->AddComment("Length of Unit"); Asm->EmitInt32(getHeaderSize() + UnitDie.getSize()); Asm->OutStreamer->AddComment("DWARF version number"); Asm->EmitInt16(DD->getDwarfVersion()); Asm->OutStreamer->AddComment("Offset Into Abbrev. Section"); // We share one abbreviations table across all units so it's always at the // start of the section. Use a relocatable offset where needed to ensure // linking doesn't invalidate that offset. const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering(); Asm->emitDwarfSymbolReference(TLOF.getDwarfAbbrevSection()->getBeginSymbol(), UseOffsets); Asm->OutStreamer->AddComment("Address Size (in bytes)"); Asm->EmitInt8(Asm->getDataLayout().getPointerSize()); } void DwarfUnit::initSection(MCSection *Section) { assert(!this->Section); this->Section = Section; } void DwarfTypeUnit::emitHeader(bool UseOffsets) { DwarfUnit::emitHeader(UseOffsets); Asm->OutStreamer->AddComment("Type Signature"); Asm->OutStreamer->EmitIntValue(TypeSignature, sizeof(TypeSignature)); Asm->OutStreamer->AddComment("Type DIE Offset"); // In a skeleton type unit there is no type DIE so emit a zero offset. Asm->OutStreamer->EmitIntValue(Ty ? Ty->getOffset() : 0, sizeof(Ty->getOffset())); } bool DwarfTypeUnit::isDwoUnit() const { // Since there are no skeleton type units, all type units are dwo type units // when split DWARF is being used. return DD->useSplitDwarf(); }
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
//===-- llvm/CodeGen/DwarfCompileUnit.h - Dwarf Compile Unit ---*- C++ -*--===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing dwarf compile unit. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DWARFCOMPILEUNIT_H #define LLVM_LIB_CODEGEN_ASMPRINTER_DWARFCOMPILEUNIT_H #include "DwarfUnit.h" #include "llvm/ADT/StringRef.h" #include "llvm/IR/DebugInfo.h" #include "llvm/Support/Dwarf.h" namespace llvm { class AsmPrinter; class DIE; class DwarfDebug; class DwarfFile; class MCSymbol; class LexicalScope; class DwarfCompileUnit : public DwarfUnit { /// The attribute index of DW_AT_stmt_list in the compile unit DIE, avoiding /// the need to search for it in applyStmtList. DIE::value_iterator StmtListValue; /// Skeleton unit associated with this unit. DwarfCompileUnit *Skeleton; /// The start of the unit within its section. MCSymbol *LabelBegin; /// GlobalNames - A map of globally visible named entities for this unit. StringMap<const DIE *> GlobalNames; /// GlobalTypes - A map of globally visible types for this unit. StringMap<const DIE *> GlobalTypes; // List of range lists for a given compile unit, separate from the ranges for // the CU itself. SmallVector<RangeSpanList, 1> CURangeLists; // List of ranges for a given compile unit. SmallVector<RangeSpan, 2> CURanges; // The base address of this unit, if any. Used for relative references in // ranges/locs. const MCSymbol *BaseAddress; /// \brief Construct a DIE for the given DbgVariable without initializing the /// DbgVariable's DIE reference. DIE *constructVariableDIEImpl(const DbgVariable &DV, bool Abstract); bool isDwoUnit() const override; bool includeMinimalInlineScopes() const; public: DwarfCompileUnit(unsigned UID, const DICompileUnit *Node, AsmPrinter *A, DwarfDebug *DW, DwarfFile *DWU); DwarfCompileUnit *getSkeleton() const { return Skeleton; } void initStmtList(); /// Apply the DW_AT_stmt_list from this compile unit to the specified DIE. void applyStmtList(DIE &D); /// getOrCreateGlobalVariableDIE - get or create global variable DIE. DIE *getOrCreateGlobalVariableDIE(const DIGlobalVariable *GV); /// addLabelAddress - Add a dwarf label attribute data and value using /// either DW_FORM_addr or DW_FORM_GNU_addr_index. void addLabelAddress(DIE &Die, dwarf::Attribute Attribute, const MCSymbol *Label); /// addLocalLabelAddress - Add a dwarf label attribute data and value using /// DW_FORM_addr only. void addLocalLabelAddress(DIE &Die, dwarf::Attribute Attribute, const MCSymbol *Label); /// addSectionDelta - Add a label delta attribute data and value. DIE::value_iterator addSectionDelta(DIE &Die, dwarf::Attribute Attribute, const MCSymbol *Hi, const MCSymbol *Lo); DwarfCompileUnit &getCU() override { return *this; } unsigned getOrCreateSourceID(StringRef FileName, StringRef DirName) override; /// addRange - Add an address range to the list of ranges for this unit. void addRange(RangeSpan Range); void attachLowHighPC(DIE &D, const MCSymbol *Begin, const MCSymbol *End); /// addSectionLabel - Add a Dwarf section label attribute data and value. /// DIE::value_iterator addSectionLabel(DIE &Die, dwarf::Attribute Attribute, const MCSymbol *Label, const MCSymbol *Sec); /// \brief Find DIE for the given subprogram and attach appropriate /// DW_AT_low_pc and DW_AT_high_pc attributes. If there are global /// variables in this scope then create and insert DIEs for these /// variables. DIE &updateSubprogramScopeDIE(const DISubprogram *SP); void constructScopeDIE(LexicalScope *Scope, SmallVectorImpl<DIE *> &FinalChildren); /// \brief A helper function to construct a RangeSpanList for a given /// lexical scope. void addScopeRangeList(DIE &ScopeDIE, SmallVector<RangeSpan, 2> Range); void attachRangesOrLowHighPC(DIE &D, SmallVector<RangeSpan, 2> Ranges); void attachRangesOrLowHighPC(DIE &D, const SmallVectorImpl<InsnRange> &Ranges); /// \brief This scope represents inlined body of a function. Construct /// DIE to represent this concrete inlined copy of the function. DIE *constructInlinedScopeDIE(LexicalScope *Scope); /// \brief Construct new DW_TAG_lexical_block for this scope and /// attach DW_AT_low_pc/DW_AT_high_pc labels. DIE *constructLexicalScopeDIE(LexicalScope *Scope); /// constructVariableDIE - Construct a DIE for the given DbgVariable. DIE *constructVariableDIE(DbgVariable &DV, bool Abstract = false); DIE *constructVariableDIE(DbgVariable &DV, const LexicalScope &Scope, DIE *&ObjectPointer); /// A helper function to create children of a Scope DIE. DIE *createScopeChildrenDIE(LexicalScope *Scope, SmallVectorImpl<DIE *> &Children, unsigned *ChildScopeCount = nullptr); /// \brief Construct a DIE for this subprogram scope. void constructSubprogramScopeDIE(LexicalScope *Scope); DIE *createAndAddScopeChildren(LexicalScope *Scope, DIE &ScopeDIE); void constructAbstractSubprogramScopeDIE(LexicalScope *Scope); /// \brief Construct import_module DIE. DIE *constructImportedEntityDIE(const DIImportedEntity *Module); void finishSubprogramDefinition(const DISubprogram *SP); void collectDeadVariables(const DISubprogram *SP); /// Set the skeleton unit associated with this unit. void setSkeleton(DwarfCompileUnit &Skel) { Skeleton = &Skel; } const MCSymbol *getSectionSym() const { assert(Section); return Section->getBeginSymbol(); } unsigned getLength() { return sizeof(uint32_t) + // Length field getHeaderSize() + UnitDie.getSize(); } void emitHeader(bool UseOffsets) override; MCSymbol *getLabelBegin() const { assert(Section); return LabelBegin; } /// Add a new global name to the compile unit. void addGlobalName(StringRef Name, DIE &Die, const DIScope *Context) override; /// Add a new global type to the compile unit. void addGlobalType(const DIType *Ty, const DIE &Die, const DIScope *Context) override; const StringMap<const DIE *> &getGlobalNames() const { return GlobalNames; } const StringMap<const DIE *> &getGlobalTypes() const { return GlobalTypes; } /// Add DW_AT_location attribute for a DbgVariable based on provided /// MachineLocation. void addVariableAddress(const DbgVariable &DV, DIE &Die, MachineLocation Location); /// Add an address attribute to a die based on the location provided. void addAddress(DIE &Die, dwarf::Attribute Attribute, const MachineLocation &Location); /// Start with the address based on the location provided, and generate the /// DWARF information necessary to find the actual variable (navigating the /// extra location information encoded in the type) based on the starting /// location. Add the DWARF information to the die. void addComplexAddress(const DbgVariable &DV, DIE &Die, dwarf::Attribute Attribute, const MachineLocation &Location); /// Add a Dwarf loclistptr attribute data and value. void addLocationList(DIE &Die, dwarf::Attribute Attribute, unsigned Index); void applyVariableAttributes(const DbgVariable &Var, DIE &VariableDie); /// Add a Dwarf expression attribute data and value. void addExpr(DIELoc &Die, dwarf::Form Form, const MCExpr *Expr); void applySubprogramAttributesToDefinition(const DISubprogram *SP, DIE &SPDie); /// getRangeLists - Get the vector of range lists. const SmallVectorImpl<RangeSpanList> &getRangeLists() const { return (Skeleton ? Skeleton : this)->CURangeLists; } /// getRanges - Get the list of ranges for this unit. const SmallVectorImpl<RangeSpan> &getRanges() const { return CURanges; } SmallVector<RangeSpan, 2> takeRanges() { return std::move(CURanges); } void setBaseAddress(const MCSymbol *Base) { BaseAddress = Base; } const MCSymbol *getBaseAddress() const { return BaseAddress; } }; } // end llvm namespace #endif
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfDebug.h
//===-- llvm/CodeGen/DwarfDebug.h - Dwarf Debug Framework ------*- C++ -*--===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing dwarf debug info into asm files. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DWARFDEBUG_H #define LLVM_LIB_CODEGEN_ASMPRINTER_DWARFDEBUG_H #include "AsmPrinterHandler.h" #include "DbgValueHistoryCalculator.h" #include "DebugLocStream.h" #include "DwarfAccelTable.h" #include "DwarfFile.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/CodeGen/DIE.h" #include "llvm/CodeGen/LexicalScopes.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DebugLoc.h" #include "llvm/MC/MCDwarf.h" #include "llvm/MC/MachineLocation.h" #include "llvm/Support/Allocator.h" #include <memory> namespace llvm { class AsmPrinter; class ByteStreamer; class ConstantInt; class ConstantFP; class DebugLocEntry; class DwarfCompileUnit; class DwarfDebug; class DwarfTypeUnit; class DwarfUnit; class MachineModuleInfo; //===----------------------------------------------------------------------===// /// This class is used to record source line correspondence. class SrcLineInfo { unsigned Line; // Source line number. unsigned Column; // Source column. unsigned SourceID; // Source ID number. MCSymbol *Label; // Label in code ID number. public: SrcLineInfo(unsigned L, unsigned C, unsigned S, MCSymbol *label) : Line(L), Column(C), SourceID(S), Label(label) {} // Accessors unsigned getLine() const { return Line; } unsigned getColumn() const { return Column; } unsigned getSourceID() const { return SourceID; } MCSymbol *getLabel() const { return Label; } }; // // /////////////////////////////////////////////////////////////////////////////// /// This class is used to track local variable information. /// /// Variables can be created from allocas, in which case they're generated from /// the MMI table. Such variables can have multiple expressions and frame /// indices. The \a Expr and \a FrameIndices array must match. /// /// Variables can be created from \c DBG_VALUE instructions. Those whose /// location changes over time use \a DebugLocListIndex, while those with a /// single instruction use \a MInsn and (optionally) a single entry of \a Expr. /// /// Variables that have been optimized out use none of these fields. class DbgVariable { const DILocalVariable *Var; /// Variable Descriptor. const DILocation *IA; /// Inlined at location. SmallVector<const DIExpression *, 1> Expr; /// Complex address. DIE *TheDIE = nullptr; /// Variable DIE. unsigned DebugLocListIndex = ~0u; /// Offset in DebugLocs. const MachineInstr *MInsn = nullptr; /// DBG_VALUE instruction. SmallVector<int, 1> FrameIndex; /// Frame index. DwarfDebug *DD; public: /// Construct a DbgVariable. /// /// Creates a variable without any DW_AT_location. Call \a initializeMMI() /// for MMI entries, or \a initializeDbgValue() for DBG_VALUE instructions. DbgVariable(const DILocalVariable *V, const DILocation *IA, DwarfDebug *DD) : Var(V), IA(IA), DD(DD) {} /// Initialize from the MMI table. void initializeMMI(const DIExpression *E, int FI) { assert(Expr.empty() && "Already initialized?"); assert(FrameIndex.empty() && "Already initialized?"); assert(!MInsn && "Already initialized?"); assert((!E || E->isValid()) && "Expected valid expression"); assert(~FI && "Expected valid index"); Expr.push_back(E); FrameIndex.push_back(FI); } /// Initialize from a DBG_VALUE instruction. void initializeDbgValue(const MachineInstr *DbgValue) { assert(Expr.empty() && "Already initialized?"); assert(FrameIndex.empty() && "Already initialized?"); assert(!MInsn && "Already initialized?"); assert(Var == DbgValue->getDebugVariable() && "Wrong variable"); assert(IA == DbgValue->getDebugLoc()->getInlinedAt() && "Wrong inlined-at"); MInsn = DbgValue; if (auto *E = DbgValue->getDebugExpression()) if (E->getNumElements()) Expr.push_back(E); } // Accessors. const DILocalVariable *getVariable() const { return Var; } const DILocation *getInlinedAt() const { return IA; } const ArrayRef<const DIExpression *> getExpression() const { return Expr; } void setDIE(DIE &D) { TheDIE = &D; } DIE *getDIE() const { return TheDIE; } void setDebugLocListIndex(unsigned O) { DebugLocListIndex = O; } unsigned getDebugLocListIndex() const { return DebugLocListIndex; } StringRef getName() const { return Var->getName(); } const MachineInstr *getMInsn() const { return MInsn; } const ArrayRef<int> getFrameIndex() const { return FrameIndex; } void addMMIEntry(const DbgVariable &V) { assert(DebugLocListIndex == ~0U && !MInsn && "not an MMI entry"); assert(V.DebugLocListIndex == ~0U && !V.MInsn && "not an MMI entry"); assert(V.Var == Var && "conflicting variable"); assert(V.IA == IA && "conflicting inlined-at location"); assert(!FrameIndex.empty() && "Expected an MMI entry"); assert(!V.FrameIndex.empty() && "Expected an MMI entry"); assert(Expr.size() == FrameIndex.size() && "Mismatched expressions"); assert(V.Expr.size() == V.FrameIndex.size() && "Mismatched expressions"); Expr.append(V.Expr.begin(), V.Expr.end()); FrameIndex.append(V.FrameIndex.begin(), V.FrameIndex.end()); assert(std::all_of(Expr.begin(), Expr.end(), [](const DIExpression *E) { return E && E->isBitPiece(); }) && "conflicting locations for variable"); } // Translate tag to proper Dwarf tag. dwarf::Tag getTag() const { if (Var->getTag() == dwarf::DW_TAG_arg_variable) return dwarf::DW_TAG_formal_parameter; return dwarf::DW_TAG_variable; } /// Return true if DbgVariable is artificial. bool isArtificial() const { if (Var->isArtificial()) return true; if (getType()->isArtificial()) return true; return false; } bool isObjectPointer() const { if (Var->isObjectPointer()) return true; if (getType()->isObjectPointer()) return true; return false; } bool hasComplexAddress() const { assert(MInsn && "Expected DBG_VALUE, not MMI variable"); assert(FrameIndex.empty() && "Expected DBG_VALUE, not MMI variable"); assert( (Expr.empty() || (Expr.size() == 1 && Expr.back()->getNumElements())) && "Invalid Expr for DBG_VALUE"); return !Expr.empty(); } bool isBlockByrefVariable() const; const DIType *getType() const; private: /// Look in the DwarfDebug map for the MDNode that /// corresponds to the reference. template <typename T> T *resolve(TypedDINodeRef<T> Ref) const; }; /// Helper used to pair up a symbol and its DWARF compile unit. struct SymbolCU { SymbolCU(DwarfCompileUnit *CU, const MCSymbol *Sym) : Sym(Sym), CU(CU) {} const MCSymbol *Sym; DwarfCompileUnit *CU; }; /// Collects and handles dwarf debug information. class DwarfDebug : public AsmPrinterHandler { /// Target of Dwarf emission. AsmPrinter *Asm; /// Collected machine module information. MachineModuleInfo *MMI; /// All DIEValues are allocated through this allocator. BumpPtrAllocator DIEValueAllocator; /// Maps MDNode with its corresponding DwarfCompileUnit. MapVector<const MDNode *, DwarfCompileUnit *> CUMap; /// Maps subprogram MDNode with its corresponding DwarfCompileUnit. MapVector<const MDNode *, DwarfCompileUnit *> SPMap; /// Maps a CU DIE with its corresponding DwarfCompileUnit. DenseMap<const DIE *, DwarfCompileUnit *> CUDieMap; /// List of all labels used in aranges generation. std::vector<SymbolCU> ArangeLabels; /// Size of each symbol emitted (for those symbols that have a specific size). DenseMap<const MCSymbol *, uint64_t> SymSize; LexicalScopes LScopes; /// Collection of abstract variables. DenseMap<const MDNode *, std::unique_ptr<DbgVariable>> AbstractVariables; SmallVector<std::unique_ptr<DbgVariable>, 64> ConcreteVariables; /// Collection of DebugLocEntry. Stored in a linked list so that DIELocLists /// can refer to them in spite of insertions into this list. DebugLocStream DebugLocs; /// This is a collection of subprogram MDNodes that are processed to /// create DIEs. SmallPtrSet<const MDNode *, 16> ProcessedSPNodes; /// Maps instruction with label emitted before instruction. DenseMap<const MachineInstr *, MCSymbol *> LabelsBeforeInsn; /// Maps instruction with label emitted after instruction. DenseMap<const MachineInstr *, MCSymbol *> LabelsAfterInsn; /// History of DBG_VALUE and clobber instructions for each user /// variable. Variables are listed in order of appearance. DbgValueHistoryMap DbgValues; /// Previous instruction's location information. This is used to /// determine label location to indicate scope boundries in dwarf /// debug info. DebugLoc PrevInstLoc; MCSymbol *PrevLabel; /// This location indicates end of function prologue and beginning of /// function body. DebugLoc PrologEndLoc; /// If nonnull, stores the current machine function we're processing. const MachineFunction *CurFn; /// If nonnull, stores the current machine instruction we're processing. const MachineInstr *CurMI; /// If nonnull, stores the CU in which the previous subprogram was contained. const DwarfCompileUnit *PrevCU; /// As an optimization, there is no need to emit an entry in the directory /// table for the same directory as DW_AT_comp_dir. StringRef CompilationDir; /// Holder for the file specific debug information. DwarfFile InfoHolder; /// Holders for the various debug information flags that we might need to /// have exposed. See accessor functions below for description. /// Holder for imported entities. typedef SmallVector<std::pair<const MDNode *, const MDNode *>, 32> ImportedEntityMap; ImportedEntityMap ScopesWithImportedEntities; /// Map from MDNodes for user-defined types to the type units that /// describe them. DenseMap<const MDNode *, const DwarfTypeUnit *> DwarfTypeUnits; SmallVector< std::pair<std::unique_ptr<DwarfTypeUnit>, const DICompositeType *>, 1> TypeUnitsUnderConstruction; /// Whether to emit the pubnames/pubtypes sections. bool HasDwarfPubSections; /// Whether or not to use AT_ranges for compilation units. bool HasCURanges; /// Whether we emitted a function into a section other than the /// default text. bool UsedNonDefaultText; /// Whether to use the GNU TLS opcode (instead of the standard opcode). bool UseGNUTLSOpcode; /// Version of dwarf we're emitting. unsigned DwarfVersion; /// Maps from a type identifier to the actual MDNode. DITypeIdentifierMap TypeIdentifierMap; /// DWARF5 Experimental Options /// @{ bool HasDwarfAccelTables; bool HasSplitDwarf; /// Separated Dwarf Variables /// In general these will all be for bits that are left in the /// original object file, rather than things that are meant /// to be in the .dwo sections. /// Holder for the skeleton information. DwarfFile SkeletonHolder; /// Store file names for type units under fission in a line table /// header that will be emitted into debug_line.dwo. // FIXME: replace this with a map from comp_dir to table so that we // can emit multiple tables during LTO each of which uses directory // 0, referencing the comp_dir of all the type units that use it. MCDwarfDwoLineTable SplitTypeUnitFileTable; /// @} /// True iff there are multiple CUs in this module. bool SingleCU; bool IsDarwin; bool IsPS4; AddressPool AddrPool; DwarfAccelTable AccelNames; DwarfAccelTable AccelObjC; DwarfAccelTable AccelNamespace; DwarfAccelTable AccelTypes; DenseMap<const Function *, DISubprogram *> FunctionDIs; MCDwarfDwoLineTable *getDwoLineTable(const DwarfCompileUnit &); const SmallVectorImpl<std::unique_ptr<DwarfUnit>> &getUnits() { return InfoHolder.getUnits(); } typedef DbgValueHistoryMap::InlinedVariable InlinedVariable; /// Find abstract variable associated with Var. DbgVariable *getExistingAbstractVariable(InlinedVariable IV, const DILocalVariable *&Cleansed); DbgVariable *getExistingAbstractVariable(InlinedVariable IV); void createAbstractVariable(const DILocalVariable *DV, LexicalScope *Scope); void ensureAbstractVariableIsCreated(InlinedVariable Var, const MDNode *Scope); void ensureAbstractVariableIsCreatedIfScoped(InlinedVariable Var, const MDNode *Scope); DbgVariable *createConcreteVariable(LexicalScope &Scope, InlinedVariable IV); /// Construct a DIE for this abstract scope. void constructAbstractSubprogramScopeDIE(LexicalScope *Scope); /// Compute the size and offset of a DIE given an incoming Offset. unsigned computeSizeAndOffset(DIE *Die, unsigned Offset); /// Compute the size and offset of all the DIEs. void computeSizeAndOffsets(); /// Collect info for variables that were optimized out. void collectDeadVariables(); void finishVariableDefinitions(); void finishSubprogramDefinitions(); /// Finish off debug information after all functions have been /// processed. void finalizeModuleInfo(); /// Emit the debug info section. void emitDebugInfo(); /// Emit the abbreviation section. void emitAbbreviations(); /// Emit a specified accelerator table. void emitAccel(DwarfAccelTable &Accel, MCSection *Section, StringRef TableName); /// Emit visible names into a hashed accelerator table section. void emitAccelNames(); /// Emit objective C classes and categories into a hashed /// accelerator table section. void emitAccelObjC(); /// Emit namespace dies into a hashed accelerator table. void emitAccelNamespaces(); /// Emit type dies into a hashed accelerator table. void emitAccelTypes(); /// Emit visible names into a debug pubnames section. /// \param GnuStyle determines whether or not we want to emit /// additional information into the table ala newer gcc for gdb /// index. void emitDebugPubNames(bool GnuStyle = false); /// Emit visible types into a debug pubtypes section. /// \param GnuStyle determines whether or not we want to emit /// additional information into the table ala newer gcc for gdb /// index. void emitDebugPubTypes(bool GnuStyle = false); void emitDebugPubSection( bool GnuStyle, MCSection *PSec, StringRef Name, const StringMap<const DIE *> &(DwarfCompileUnit::*Accessor)() const); /// Emit visible names into a debug str section. void emitDebugStr(); /// Emit visible names into a debug loc section. void emitDebugLoc(); /// Emit visible names into a debug loc dwo section. void emitDebugLocDWO(); /// Emit visible names into a debug aranges section. void emitDebugARanges(); /// Emit visible names into a debug ranges section. void emitDebugRanges(); /// Emit inline info using custom format. void emitDebugInlineInfo(); /// DWARF 5 Experimental Split Dwarf Emitters /// Initialize common features of skeleton units. void initSkeletonUnit(const DwarfUnit &U, DIE &Die, std::unique_ptr<DwarfUnit> NewU); /// Construct the split debug info compile unit for the debug info /// section. DwarfCompileUnit &constructSkeletonCU(const DwarfCompileUnit &CU); /// Construct the split debug info compile unit for the debug info /// section. DwarfTypeUnit &constructSkeletonTU(DwarfTypeUnit &TU); /// Emit the debug info dwo section. void emitDebugInfoDWO(); /// Emit the debug abbrev dwo section. void emitDebugAbbrevDWO(); /// Emit the debug line dwo section. void emitDebugLineDWO(); /// Emit the debug str dwo section. void emitDebugStrDWO(); /// Flags to let the linker know we have emitted new style pubnames. Only /// emit it here if we don't have a skeleton CU for split dwarf. void addGnuPubAttributes(DwarfUnit &U, DIE &D) const; /// Create new DwarfCompileUnit for the given metadata node with tag /// DW_TAG_compile_unit. DwarfCompileUnit &constructDwarfCompileUnit(const DICompileUnit *DIUnit); /// Construct imported_module or imported_declaration DIE. void constructAndAddImportedEntityDIE(DwarfCompileUnit &TheCU, const DIImportedEntity *N); /// Register a source line with debug info. Returns the unique /// label that was emitted and which provides correspondence to the /// source line list. void recordSourceLine(unsigned Line, unsigned Col, const MDNode *Scope, unsigned Flags); /// Indentify instructions that are marking the beginning of or /// ending of a scope. void identifyScopeMarkers(); /// Populate LexicalScope entries with variables' info. void collectVariableInfo(DwarfCompileUnit &TheCU, const DISubprogram *SP, DenseSet<InlinedVariable> &ProcessedVars); /// Build the location list for all DBG_VALUEs in the /// function that describe the same variable. void buildLocationList(SmallVectorImpl<DebugLocEntry> &DebugLoc, const DbgValueHistoryMap::InstrRanges &Ranges); /// Collect variable information from the side table maintained /// by MMI. void collectVariableInfoFromMMITable(DenseSet<InlinedVariable> &P); /// Ensure that a label will be emitted before MI. void requestLabelBeforeInsn(const MachineInstr *MI) { LabelsBeforeInsn.insert(std::make_pair(MI, nullptr)); } /// Ensure that a label will be emitted after MI. void requestLabelAfterInsn(const MachineInstr *MI) { LabelsAfterInsn.insert(std::make_pair(MI, nullptr)); } public: //===--------------------------------------------------------------------===// // Main entry points. // DwarfDebug(AsmPrinter *A, Module *M); ~DwarfDebug() override; /// Emit all Dwarf sections that should come prior to the /// content. void beginModule(); /// Emit all Dwarf sections that should come after the content. void endModule() override; /// Gather pre-function debug information. void beginFunction(const MachineFunction *MF) override; /// Gather and emit post-function debug information. void endFunction(const MachineFunction *MF) override; /// Process beginning of an instruction. void beginInstruction(const MachineInstr *MI) override; /// Process end of an instruction. void endInstruction() override; /// Add a DIE to the set of types that we're going to pull into /// type units. void addDwarfTypeUnitType(DwarfCompileUnit &CU, StringRef Identifier, DIE &Die, const DICompositeType *CTy); /// Add a label so that arange data can be generated for it. void addArangeLabel(SymbolCU SCU) { ArangeLabels.push_back(SCU); } /// For symbols that have a size designated (e.g. common symbols), /// this tracks that size. void setSymbolSize(const MCSymbol *Sym, uint64_t Size) override { SymSize[Sym] = Size; } /// Returns whether to use DW_OP_GNU_push_tls_address, instead of the /// standard DW_OP_form_tls_address opcode bool useGNUTLSOpcode() const { return UseGNUTLSOpcode; } // Experimental DWARF5 features. /// Returns whether or not to emit tables that dwarf consumers can /// use to accelerate lookup. bool useDwarfAccelTables() const { return HasDwarfAccelTables; } /// Returns whether or not to change the current debug info for the /// split dwarf proposal support. bool useSplitDwarf() const { return HasSplitDwarf; } /// Returns the Dwarf Version. unsigned getDwarfVersion() const { return DwarfVersion; } /// Returns the previous CU that was being updated const DwarfCompileUnit *getPrevCU() const { return PrevCU; } void setPrevCU(const DwarfCompileUnit *PrevCU) { this->PrevCU = PrevCU; } /// Returns the entries for the .debug_loc section. const DebugLocStream &getDebugLocs() const { return DebugLocs; } /// Emit an entry for the debug loc section. This can be used to /// handle an entry that's going to be emitted into the debug loc section. void emitDebugLocEntry(ByteStreamer &Streamer, const DebugLocStream::Entry &Entry); /// Emit the location for a debug loc entry, including the size header. void emitDebugLocEntryLocation(const DebugLocStream::Entry &Entry); /// Find the MDNode for the given reference. template <typename T> T *resolve(TypedDINodeRef<T> Ref) const { return Ref.resolve(TypeIdentifierMap); } /// Return the TypeIdentifierMap. const DITypeIdentifierMap &getTypeIdentifierMap() const { return TypeIdentifierMap; } /// Find the DwarfCompileUnit for the given CU Die. DwarfCompileUnit *lookupUnit(const DIE *CU) const { return CUDieMap.lookup(CU); } /// isSubprogramContext - Return true if Context is either a subprogram /// or another context nested inside a subprogram. bool isSubprogramContext(const MDNode *Context); void addSubprogramNames(const DISubprogram *SP, DIE &Die); AddressPool &getAddressPool() { return AddrPool; } void addAccelName(StringRef Name, const DIE &Die); void addAccelObjC(StringRef Name, const DIE &Die); void addAccelNamespace(StringRef Name, const DIE &Die); void addAccelType(StringRef Name, const DIE &Die, char Flags); const MachineFunction *getCurrentFunction() const { return CurFn; } iterator_range<ImportedEntityMap::const_iterator> findImportedEntitiesForScope(const MDNode *Scope) const { return make_range(std::equal_range( ScopesWithImportedEntities.begin(), ScopesWithImportedEntities.end(), std::pair<const MDNode *, const MDNode *>(Scope, nullptr), less_first())); } /// A helper function to check whether the DIE for a given Scope is /// going to be null. bool isLexicalScopeDIENull(LexicalScope *Scope); /// Return Label preceding the instruction. MCSymbol *getLabelBeforeInsn(const MachineInstr *MI); /// Return Label immediately following the instruction. MCSymbol *getLabelAfterInsn(const MachineInstr *MI); // FIXME: Sink these functions down into DwarfFile/Dwarf*Unit. SmallPtrSet<const MDNode *, 16> &getProcessedSPNodes() { return ProcessedSPNodes; } }; } // End of namespace llvm #endif
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
//===-- llvm/CodeGen/DwarfDebug.cpp - Dwarf Debug Framework ---------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing dwarf debug info into asm files. // //===----------------------------------------------------------------------===// #include "DwarfDebug.h" #include "ByteStreamer.h" #include "DIEHash.h" #include "DebugLocEntry.h" #include "DwarfCompileUnit.h" #include "DwarfExpression.h" #include "DwarfUnit.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Triple.h" #include "llvm/CodeGen/DIE.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/ValueHandle.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCSection.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/Endian.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FormattedStream.h" #include "llvm/Support/LEB128.h" #include "llvm/Support/MD5.h" #include "llvm/Support/Path.h" #include "llvm/Support/Timer.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetSubtargetInfo.h" using namespace llvm; #define DEBUG_TYPE "dwarfdebug" static cl::opt<bool> DisableDebugInfoPrinting("disable-debug-info-print", cl::Hidden, cl::desc("Disable debug info printing")); static cl::opt<bool> UnknownLocations( "use-unknown-locations", cl::Hidden, cl::desc("Make an absence of debug location information explicit."), cl::init(false)); static cl::opt<bool> GenerateGnuPubSections("generate-gnu-dwarf-pub-sections", cl::Hidden, cl::desc("Generate GNU-style pubnames and pubtypes"), cl::init(false)); static cl::opt<bool> GenerateARangeSection("generate-arange-section", cl::Hidden, cl::desc("Generate dwarf aranges"), cl::init(false)); namespace { enum DefaultOnOff { Default, Enable, Disable }; } static cl::opt<DefaultOnOff> DwarfAccelTables("dwarf-accel-tables", cl::Hidden, cl::desc("Output prototype dwarf accelerator tables."), cl::values(clEnumVal(Default, "Default for platform"), clEnumVal(Enable, "Enabled"), clEnumVal(Disable, "Disabled"), clEnumValEnd), cl::init(Default)); static cl::opt<DefaultOnOff> SplitDwarf("split-dwarf", cl::Hidden, cl::desc("Output DWARF5 split debug info."), cl::values(clEnumVal(Default, "Default for platform"), clEnumVal(Enable, "Enabled"), clEnumVal(Disable, "Disabled"), clEnumValEnd), cl::init(Default)); static cl::opt<DefaultOnOff> DwarfPubSections("generate-dwarf-pub-sections", cl::Hidden, cl::desc("Generate DWARF pubnames and pubtypes sections"), cl::values(clEnumVal(Default, "Default for platform"), clEnumVal(Enable, "Enabled"), clEnumVal(Disable, "Disabled"), clEnumValEnd), cl::init(Default)); static const char *const DWARFGroupName = "DWARF Emission"; static const char *const DbgTimerName = "DWARF Debug Writer"; void DebugLocDwarfExpression::EmitOp(uint8_t Op, const char *Comment) { BS.EmitInt8( Op, Comment ? Twine(Comment) + " " + dwarf::OperationEncodingString(Op) : dwarf::OperationEncodingString(Op)); } void DebugLocDwarfExpression::EmitSigned(int64_t Value) { BS.EmitSLEB128(Value, Twine(Value)); } void DebugLocDwarfExpression::EmitUnsigned(uint64_t Value) { BS.EmitULEB128(Value, Twine(Value)); } bool DebugLocDwarfExpression::isFrameRegister(unsigned MachineReg) { // This information is not available while emitting .debug_loc entries. return false; } //===----------------------------------------------------------------------===// /// resolve - Look in the DwarfDebug map for the MDNode that /// corresponds to the reference. template <typename T> T *DbgVariable::resolve(TypedDINodeRef<T> Ref) const { return DD->resolve(Ref); } bool DbgVariable::isBlockByrefVariable() const { assert(Var && "Invalid complex DbgVariable!"); return Var->getType() .resolve(DD->getTypeIdentifierMap()) ->isBlockByrefStruct(); } const DIType *DbgVariable::getType() const { DIType *Ty = Var->getType().resolve(DD->getTypeIdentifierMap()); // FIXME: isBlockByrefVariable should be reformulated in terms of complex // addresses instead. if (Ty->isBlockByrefStruct()) { /* Byref variables, in Blocks, are declared by the programmer as "SomeType VarName;", but the compiler creates a __Block_byref_x_VarName struct, and gives the variable VarName either the struct, or a pointer to the struct, as its type. This is necessary for various behind-the-scenes things the compiler needs to do with by-reference variables in blocks. However, as far as the original *programmer* is concerned, the variable should still have type 'SomeType', as originally declared. The following function dives into the __Block_byref_x_VarName struct to find the original type of the variable. This will be passed back to the code generating the type for the Debug Information Entry for the variable 'VarName'. 'VarName' will then have the original type 'SomeType' in its debug information. The original type 'SomeType' will be the type of the field named 'VarName' inside the __Block_byref_x_VarName struct. NOTE: In order for this to not completely fail on the debugger side, the Debug Information Entry for the variable VarName needs to have a DW_AT_location that tells the debugger how to unwind through the pointers and __Block_byref_x_VarName struct to find the actual value of the variable. The function addBlockByrefType does this. */ DIType *subType = Ty; uint16_t tag = Ty->getTag(); if (tag == dwarf::DW_TAG_pointer_type) subType = resolve(cast<DIDerivedType>(Ty)->getBaseType()); auto Elements = cast<DICompositeTypeBase>(subType)->getElements(); for (unsigned i = 0, N = Elements.size(); i < N; ++i) { auto *DT = cast<DIDerivedTypeBase>(Elements[i]); if (getName() == DT->getName()) return resolve(DT->getBaseType()); } } return Ty; } static LLVM_CONSTEXPR DwarfAccelTable::Atom TypeAtoms[] = { DwarfAccelTable::Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4), DwarfAccelTable::Atom(dwarf::DW_ATOM_die_tag, dwarf::DW_FORM_data2), DwarfAccelTable::Atom(dwarf::DW_ATOM_type_flags, dwarf::DW_FORM_data1)}; DwarfDebug::DwarfDebug(AsmPrinter *A, Module *M) : Asm(A), MMI(Asm->MMI), DebugLocs(A->OutStreamer->isVerboseAsm()), PrevLabel(nullptr), InfoHolder(A, "info_string", DIEValueAllocator), UsedNonDefaultText(false), SkeletonHolder(A, "skel_string", DIEValueAllocator), IsDarwin(Triple(A->getTargetTriple()).isOSDarwin()), IsPS4(Triple(A->getTargetTriple()).isPS4()), AccelNames(DwarfAccelTable::Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4)), AccelObjC(DwarfAccelTable::Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4)), AccelNamespace(DwarfAccelTable::Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4)), AccelTypes(TypeAtoms) { CurFn = nullptr; CurMI = nullptr; // Turn on accelerator tables for Darwin by default, pubnames by // default for non-Darwin/PS4, and handle split dwarf. if (DwarfAccelTables == Default) HasDwarfAccelTables = IsDarwin; else HasDwarfAccelTables = DwarfAccelTables == Enable; if (SplitDwarf == Default) HasSplitDwarf = false; else HasSplitDwarf = SplitDwarf == Enable; if (DwarfPubSections == Default) HasDwarfPubSections = !IsDarwin && !IsPS4; else HasDwarfPubSections = DwarfPubSections == Enable; unsigned DwarfVersionNumber = Asm->TM.Options.MCOptions.DwarfVersion; DwarfVersion = DwarfVersionNumber ? DwarfVersionNumber : MMI->getModule()->getDwarfVersion(); // Darwin and PS4 use the standard TLS opcode (defined in DWARF 3). // Everybody else uses GNU's. UseGNUTLSOpcode = !(IsDarwin || IsPS4) || DwarfVersion < 3; Asm->OutStreamer->getContext().setDwarfVersion(DwarfVersion); { NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled); beginModule(); } } // Define out of line so we don't have to include DwarfUnit.h in DwarfDebug.h. DwarfDebug::~DwarfDebug() { } static bool isObjCClass(StringRef Name) { return Name.startswith("+") || Name.startswith("-"); } static bool hasObjCCategory(StringRef Name) { if (!isObjCClass(Name)) return false; return Name.find(") ") != StringRef::npos; } static void getObjCClassCategory(StringRef In, StringRef &Class, StringRef &Category) { if (!hasObjCCategory(In)) { Class = In.slice(In.find('[') + 1, In.find(' ')); Category = ""; return; } Class = In.slice(In.find('[') + 1, In.find('(')); Category = In.slice(In.find('[') + 1, In.find(' ')); return; } static StringRef getObjCMethodName(StringRef In) { return In.slice(In.find(' ') + 1, In.find(']')); } // Add the various names to the Dwarf accelerator table names. // TODO: Determine whether or not we should add names for programs // that do not have a DW_AT_name or DW_AT_linkage_name field - this // is only slightly different than the lookup of non-standard ObjC names. void DwarfDebug::addSubprogramNames(const DISubprogram *SP, DIE &Die) { if (!SP->isDefinition()) return; addAccelName(SP->getName(), Die); // If the linkage name is different than the name, go ahead and output // that as well into the name table. if (SP->getLinkageName() != "" && SP->getName() != SP->getLinkageName()) addAccelName(SP->getLinkageName(), Die); // If this is an Objective-C selector name add it to the ObjC accelerator // too. if (isObjCClass(SP->getName())) { StringRef Class, Category; getObjCClassCategory(SP->getName(), Class, Category); addAccelObjC(Class, Die); if (Category != "") addAccelObjC(Category, Die); // Also add the base method name to the name table. addAccelName(getObjCMethodName(SP->getName()), Die); } } /// isSubprogramContext - Return true if Context is either a subprogram /// or another context nested inside a subprogram. bool DwarfDebug::isSubprogramContext(const MDNode *Context) { if (!Context) return false; if (isa<DISubprogram>(Context)) return true; if (auto *T = dyn_cast<DIType>(Context)) return isSubprogramContext(resolve(T->getScope())); return false; } /// Check whether we should create a DIE for the given Scope, return true /// if we don't create a DIE (the corresponding DIE is null). bool DwarfDebug::isLexicalScopeDIENull(LexicalScope *Scope) { if (Scope->isAbstractScope()) return false; // We don't create a DIE if there is no Range. const SmallVectorImpl<InsnRange> &Ranges = Scope->getRanges(); if (Ranges.empty()) return true; if (Ranges.size() > 1) return false; // We don't create a DIE if we have a single Range and the end label // is null. return !getLabelAfterInsn(Ranges.front().second); } template <typename Func> void forBothCUs(DwarfCompileUnit &CU, Func F) { F(CU); if (auto *SkelCU = CU.getSkeleton()) F(*SkelCU); } void DwarfDebug::constructAbstractSubprogramScopeDIE(LexicalScope *Scope) { assert(Scope && Scope->getScopeNode()); assert(Scope->isAbstractScope()); assert(!Scope->getInlinedAt()); const MDNode *SP = Scope->getScopeNode(); ProcessedSPNodes.insert(SP); // Find the subprogram's DwarfCompileUnit in the SPMap in case the subprogram // was inlined from another compile unit. auto &CU = SPMap[SP]; forBothCUs(*CU, [&](DwarfCompileUnit &CU) { CU.constructAbstractSubprogramScopeDIE(Scope); }); } void DwarfDebug::addGnuPubAttributes(DwarfUnit &U, DIE &D) const { if (!GenerateGnuPubSections) return; U.addFlag(D, dwarf::DW_AT_GNU_pubnames); } // Create new DwarfCompileUnit for the given metadata node with tag // DW_TAG_compile_unit. DwarfCompileUnit & DwarfDebug::constructDwarfCompileUnit(const DICompileUnit *DIUnit) { StringRef FN = DIUnit->getFilename(); CompilationDir = DIUnit->getDirectory(); auto OwnedUnit = make_unique<DwarfCompileUnit>( InfoHolder.getUnits().size(), DIUnit, Asm, this, &InfoHolder); DwarfCompileUnit &NewCU = *OwnedUnit; DIE &Die = NewCU.getUnitDie(); InfoHolder.addUnit(std::move(OwnedUnit)); if (useSplitDwarf()) NewCU.setSkeleton(constructSkeletonCU(NewCU)); // LTO with assembly output shares a single line table amongst multiple CUs. // To avoid the compilation directory being ambiguous, let the line table // explicitly describe the directory of all files, never relying on the // compilation directory. if (!Asm->OutStreamer->hasRawTextSupport() || SingleCU) Asm->OutStreamer->getContext().setMCLineTableCompilationDir( NewCU.getUniqueID(), CompilationDir); NewCU.addString(Die, dwarf::DW_AT_producer, DIUnit->getProducer()); NewCU.addUInt(Die, dwarf::DW_AT_language, dwarf::DW_FORM_data2, DIUnit->getSourceLanguage()); NewCU.addString(Die, dwarf::DW_AT_name, FN); if (!useSplitDwarf()) { NewCU.initStmtList(); // If we're using split dwarf the compilation dir is going to be in the // skeleton CU and so we don't need to duplicate it here. if (!CompilationDir.empty()) NewCU.addString(Die, dwarf::DW_AT_comp_dir, CompilationDir); addGnuPubAttributes(NewCU, Die); } if (DIUnit->isOptimized()) NewCU.addFlag(Die, dwarf::DW_AT_APPLE_optimized); StringRef Flags = DIUnit->getFlags(); if (!Flags.empty()) NewCU.addString(Die, dwarf::DW_AT_APPLE_flags, Flags); if (unsigned RVer = DIUnit->getRuntimeVersion()) NewCU.addUInt(Die, dwarf::DW_AT_APPLE_major_runtime_vers, dwarf::DW_FORM_data1, RVer); if (useSplitDwarf()) NewCU.initSection(Asm->getObjFileLowering().getDwarfInfoDWOSection()); else NewCU.initSection(Asm->getObjFileLowering().getDwarfInfoSection()); CUMap.insert(std::make_pair(DIUnit, &NewCU)); CUDieMap.insert(std::make_pair(&Die, &NewCU)); return NewCU; } void DwarfDebug::constructAndAddImportedEntityDIE(DwarfCompileUnit &TheCU, const DIImportedEntity *N) { if (DIE *D = TheCU.getOrCreateContextDIE(N->getScope())) D->addChild(TheCU.constructImportedEntityDIE(N)); } // Emit all Dwarf sections that should come prior to the content. Create // global DIEs and emit initial debug info sections. This is invoked by // the target AsmPrinter. void DwarfDebug::beginModule() { if (DisableDebugInfoPrinting) return; const Module *M = MMI->getModule(); FunctionDIs = makeSubprogramMap(*M); NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu"); if (!CU_Nodes) return; TypeIdentifierMap = generateDITypeIdentifierMap(CU_Nodes); SingleCU = CU_Nodes->getNumOperands() == 1; for (MDNode *N : CU_Nodes->operands()) { auto *CUNode = cast<DICompileUnit>(N); DwarfCompileUnit &CU = constructDwarfCompileUnit(CUNode); for (auto *IE : CUNode->getImportedEntities()) ScopesWithImportedEntities.push_back(std::make_pair(IE->getScope(), IE)); // Stable sort to preserve the order of appearance of imported entities. // This is to avoid out-of-order processing of interdependent declarations // within the same scope, e.g. { namespace A = base; namespace B = A; } std::stable_sort(ScopesWithImportedEntities.begin(), ScopesWithImportedEntities.end(), less_first()); for (auto *GV : CUNode->getGlobalVariables()) CU.getOrCreateGlobalVariableDIE(GV); for (auto *SP : CUNode->getSubprograms()) SPMap.insert(std::make_pair(SP, &CU)); for (auto *Ty : CUNode->getEnumTypes()) { // The enum types array by design contains pointers to // MDNodes rather than DIRefs. Unique them here. CU.getOrCreateTypeDIE(cast<DIType>(resolve(Ty->getRef()))); } for (auto *Ty : CUNode->getRetainedTypes()) { // The retained types array by design contains pointers to // MDNodes rather than DIRefs. Unique them here. CU.getOrCreateTypeDIE(cast<DIType>(resolve(Ty->getRef()))); } // Emit imported_modules last so that the relevant context is already // available. for (auto *IE : CUNode->getImportedEntities()) constructAndAddImportedEntityDIE(CU, IE); } // Tell MMI that we have debug info. MMI->setDebugInfoAvailability(true); } void DwarfDebug::finishVariableDefinitions() { for (const auto &Var : ConcreteVariables) { DIE *VariableDie = Var->getDIE(); assert(VariableDie); // FIXME: Consider the time-space tradeoff of just storing the unit pointer // in the ConcreteVariables list, rather than looking it up again here. // DIE::getUnit isn't simple - it walks parent pointers, etc. DwarfCompileUnit *Unit = lookupUnit(VariableDie->getUnit()); assert(Unit); DbgVariable *AbsVar = getExistingAbstractVariable( InlinedVariable(Var->getVariable(), Var->getInlinedAt())); if (AbsVar && AbsVar->getDIE()) { Unit->addDIEEntry(*VariableDie, dwarf::DW_AT_abstract_origin, *AbsVar->getDIE()); } else Unit->applyVariableAttributes(*Var, *VariableDie); } } void DwarfDebug::finishSubprogramDefinitions() { for (const auto &P : SPMap) forBothCUs(*P.second, [&](DwarfCompileUnit &CU) { CU.finishSubprogramDefinition(cast<DISubprogram>(P.first)); }); } // Collect info for variables that were optimized out. void DwarfDebug::collectDeadVariables() { const Module *M = MMI->getModule(); if (NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu")) { for (MDNode *N : CU_Nodes->operands()) { auto *TheCU = cast<DICompileUnit>(N); // Construct subprogram DIE and add variables DIEs. DwarfCompileUnit *SPCU = static_cast<DwarfCompileUnit *>(CUMap.lookup(TheCU)); assert(SPCU && "Unable to find Compile Unit!"); for (auto *SP : TheCU->getSubprograms()) { if (ProcessedSPNodes.count(SP) != 0) continue; SPCU->collectDeadVariables(SP); } } } } void DwarfDebug::finalizeModuleInfo() { const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering(); finishSubprogramDefinitions(); finishVariableDefinitions(); // Collect info for variables that were optimized out. collectDeadVariables(); // Handle anything that needs to be done on a per-unit basis after // all other generation. for (const auto &P : CUMap) { auto &TheCU = *P.second; // Emit DW_AT_containing_type attribute to connect types with their // vtable holding type. TheCU.constructContainingTypeDIEs(); // Add CU specific attributes if we need to add any. // If we're splitting the dwarf out now that we've got the entire // CU then add the dwo id to it. auto *SkCU = TheCU.getSkeleton(); if (useSplitDwarf()) { // Emit a unique identifier for this CU. uint64_t ID = DIEHash(Asm).computeCUSignature(TheCU.getUnitDie()); TheCU.addUInt(TheCU.getUnitDie(), dwarf::DW_AT_GNU_dwo_id, dwarf::DW_FORM_data8, ID); SkCU->addUInt(SkCU->getUnitDie(), dwarf::DW_AT_GNU_dwo_id, dwarf::DW_FORM_data8, ID); // We don't keep track of which addresses are used in which CU so this // is a bit pessimistic under LTO. if (!AddrPool.isEmpty()) { const MCSymbol *Sym = TLOF.getDwarfAddrSection()->getBeginSymbol(); SkCU->addSectionLabel(SkCU->getUnitDie(), dwarf::DW_AT_GNU_addr_base, Sym, Sym); } if (!SkCU->getRangeLists().empty()) { const MCSymbol *Sym = TLOF.getDwarfRangesSection()->getBeginSymbol(); SkCU->addSectionLabel(SkCU->getUnitDie(), dwarf::DW_AT_GNU_ranges_base, Sym, Sym); } } // If we have code split among multiple sections or non-contiguous // ranges of code then emit a DW_AT_ranges attribute on the unit that will // remain in the .o file, otherwise add a DW_AT_low_pc. // FIXME: We should use ranges allow reordering of code ala // .subsections_via_symbols in mach-o. This would mean turning on // ranges for all subprogram DIEs for mach-o. DwarfCompileUnit &U = SkCU ? *SkCU : TheCU; if (unsigned NumRanges = TheCU.getRanges().size()) { if (NumRanges > 1) // A DW_AT_low_pc attribute may also be specified in combination with // DW_AT_ranges to specify the default base address for use in // location lists (see Section 2.6.2) and range lists (see Section // 2.17.3). U.addUInt(U.getUnitDie(), dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr, 0); else U.setBaseAddress(TheCU.getRanges().front().getStart()); U.attachRangesOrLowHighPC(U.getUnitDie(), TheCU.takeRanges()); } } // Compute DIE offsets and sizes. InfoHolder.computeSizeAndOffsets(); if (useSplitDwarf()) SkeletonHolder.computeSizeAndOffsets(); } // Emit all Dwarf sections that should come after the content. void DwarfDebug::endModule() { assert(CurFn == nullptr); assert(CurMI == nullptr); // If we aren't actually generating debug info (check beginModule - // conditionalized on !DisableDebugInfoPrinting and the presence of the // llvm.dbg.cu metadata node) if (!MMI->hasDebugInfo()) return; // Finalize the debug info for the module. finalizeModuleInfo(); emitDebugStr(); if (useSplitDwarf()) emitDebugLocDWO(); else // Emit info into a debug loc section. emitDebugLoc(); // Corresponding abbreviations into a abbrev section. emitAbbreviations(); // Emit all the DIEs into a debug info section. emitDebugInfo(); // Emit info into a debug aranges section. if (GenerateARangeSection) emitDebugARanges(); // Emit info into a debug ranges section. emitDebugRanges(); if (useSplitDwarf()) { emitDebugStrDWO(); emitDebugInfoDWO(); emitDebugAbbrevDWO(); emitDebugLineDWO(); // Emit DWO addresses. AddrPool.emit(*Asm, Asm->getObjFileLowering().getDwarfAddrSection()); } // Emit info into the dwarf accelerator table sections. if (useDwarfAccelTables()) { emitAccelNames(); emitAccelObjC(); emitAccelNamespaces(); emitAccelTypes(); } // Emit the pubnames and pubtypes sections if requested. if (HasDwarfPubSections) { emitDebugPubNames(GenerateGnuPubSections); emitDebugPubTypes(GenerateGnuPubSections); } // clean up. SPMap.clear(); AbstractVariables.clear(); } // Find abstract variable, if any, associated with Var. DbgVariable * DwarfDebug::getExistingAbstractVariable(InlinedVariable IV, const DILocalVariable *&Cleansed) { // More then one inlined variable corresponds to one abstract variable. Cleansed = IV.first; auto I = AbstractVariables.find(Cleansed); if (I != AbstractVariables.end()) return I->second.get(); return nullptr; } DbgVariable *DwarfDebug::getExistingAbstractVariable(InlinedVariable IV) { const DILocalVariable *Cleansed; return getExistingAbstractVariable(IV, Cleansed); } void DwarfDebug::createAbstractVariable(const DILocalVariable *Var, LexicalScope *Scope) { auto AbsDbgVariable = make_unique<DbgVariable>(Var, /* IA */ nullptr, this); InfoHolder.addScopeVariable(Scope, AbsDbgVariable.get()); AbstractVariables[Var] = std::move(AbsDbgVariable); } void DwarfDebug::ensureAbstractVariableIsCreated(InlinedVariable IV, const MDNode *ScopeNode) { const DILocalVariable *Cleansed = nullptr; if (getExistingAbstractVariable(IV, Cleansed)) return; createAbstractVariable(Cleansed, LScopes.getOrCreateAbstractScope( cast<DILocalScope>(ScopeNode))); } void DwarfDebug::ensureAbstractVariableIsCreatedIfScoped( InlinedVariable IV, const MDNode *ScopeNode) { const DILocalVariable *Cleansed = nullptr; if (getExistingAbstractVariable(IV, Cleansed)) return; if (LexicalScope *Scope = LScopes.findAbstractScope(cast_or_null<DILocalScope>(ScopeNode))) createAbstractVariable(Cleansed, Scope); } // Collect variable information from side table maintained by MMI. void DwarfDebug::collectVariableInfoFromMMITable( DenseSet<InlinedVariable> &Processed) { for (const auto &VI : MMI->getVariableDbgInfo()) { if (!VI.Var) continue; assert(VI.Var->isValidLocationForIntrinsic(VI.Loc) && "Expected inlined-at fields to agree"); InlinedVariable Var(VI.Var, VI.Loc->getInlinedAt()); Processed.insert(Var); LexicalScope *Scope = LScopes.findLexicalScope(VI.Loc); // If variable scope is not found then skip this variable. if (!Scope) continue; ensureAbstractVariableIsCreatedIfScoped(Var, Scope->getScopeNode()); auto RegVar = make_unique<DbgVariable>(Var.first, Var.second, this); RegVar->initializeMMI(VI.Expr, VI.Slot); if (InfoHolder.addScopeVariable(Scope, RegVar.get())) ConcreteVariables.push_back(std::move(RegVar)); } } // Get .debug_loc entry for the instruction range starting at MI. static DebugLocEntry::Value getDebugLocValue(const MachineInstr *MI) { const DIExpression *Expr = MI->getDebugExpression(); assert(MI->getNumOperands() == 4); if (MI->getOperand(0).isReg()) { MachineLocation MLoc; // If the second operand is an immediate, this is a // register-indirect address. if (!MI->getOperand(1).isImm()) MLoc.set(MI->getOperand(0).getReg()); else MLoc.set(MI->getOperand(0).getReg(), MI->getOperand(1).getImm()); return DebugLocEntry::Value(Expr, MLoc); } if (MI->getOperand(0).isImm()) return DebugLocEntry::Value(Expr, MI->getOperand(0).getImm()); if (MI->getOperand(0).isFPImm()) return DebugLocEntry::Value(Expr, MI->getOperand(0).getFPImm()); if (MI->getOperand(0).isCImm()) return DebugLocEntry::Value(Expr, MI->getOperand(0).getCImm()); llvm_unreachable("Unexpected 4-operand DBG_VALUE instruction!"); } /// Determine whether two variable pieces overlap. static bool piecesOverlap(const DIExpression *P1, const DIExpression *P2) { if (!P1->isBitPiece() || !P2->isBitPiece()) return true; unsigned l1 = P1->getBitPieceOffset(); unsigned l2 = P2->getBitPieceOffset(); unsigned r1 = l1 + P1->getBitPieceSize(); unsigned r2 = l2 + P2->getBitPieceSize(); // True where [l1,r1[ and [r1,r2[ overlap. return (l1 < r2) && (l2 < r1); } /// Build the location list for all DBG_VALUEs in the function that /// describe the same variable. If the ranges of several independent /// pieces of the same variable overlap partially, split them up and /// combine the ranges. The resulting DebugLocEntries are will have /// strict monotonically increasing begin addresses and will never /// overlap. // // Input: // // Ranges History [var, loc, piece ofs size] // 0 | [x, (reg0, piece 0, 32)] // 1 | | [x, (reg1, piece 32, 32)] <- IsPieceOfPrevEntry // 2 | | ... // 3 | [clobber reg0] // 4 [x, (mem, piece 0, 64)] <- overlapping with both previous pieces of // x. // // Output: // // [0-1] [x, (reg0, piece 0, 32)] // [1-3] [x, (reg0, piece 0, 32), (reg1, piece 32, 32)] // [3-4] [x, (reg1, piece 32, 32)] // [4- ] [x, (mem, piece 0, 64)] void DwarfDebug::buildLocationList(SmallVectorImpl<DebugLocEntry> &DebugLoc, const DbgValueHistoryMap::InstrRanges &Ranges) { SmallVector<DebugLocEntry::Value, 4> OpenRanges; for (auto I = Ranges.begin(), E = Ranges.end(); I != E; ++I) { const MachineInstr *Begin = I->first; const MachineInstr *End = I->second; assert(Begin->isDebugValue() && "Invalid History entry"); // Check if a variable is inaccessible in this range. if (Begin->getNumOperands() > 1 && Begin->getOperand(0).isReg() && !Begin->getOperand(0).getReg()) { OpenRanges.clear(); continue; } // If this piece overlaps with any open ranges, truncate them. const DIExpression *DIExpr = Begin->getDebugExpression(); auto Last = std::remove_if(OpenRanges.begin(), OpenRanges.end(), [&](DebugLocEntry::Value R) { return piecesOverlap(DIExpr, R.getExpression()); }); OpenRanges.erase(Last, OpenRanges.end()); const MCSymbol *StartLabel = getLabelBeforeInsn(Begin); assert(StartLabel && "Forgot label before DBG_VALUE starting a range!"); const MCSymbol *EndLabel; if (End != nullptr) EndLabel = getLabelAfterInsn(End); else if (std::next(I) == Ranges.end()) EndLabel = Asm->getFunctionEnd(); else EndLabel = getLabelBeforeInsn(std::next(I)->first); assert(EndLabel && "Forgot label after instruction ending a range!"); DEBUG(dbgs() << "DotDebugLoc: " << *Begin << "\n"); auto Value = getDebugLocValue(Begin); DebugLocEntry Loc(StartLabel, EndLabel, Value); bool couldMerge = false; // If this is a piece, it may belong to the current DebugLocEntry. if (DIExpr->isBitPiece()) { // Add this value to the list of open ranges. OpenRanges.push_back(Value); // Attempt to add the piece to the last entry. if (!DebugLoc.empty()) if (DebugLoc.back().MergeValues(Loc)) couldMerge = true; } if (!couldMerge) { // Need to add a new DebugLocEntry. Add all values from still // valid non-overlapping pieces. if (OpenRanges.size()) Loc.addValues(OpenRanges); DebugLoc.push_back(std::move(Loc)); } // Attempt to coalesce the ranges of two otherwise identical // DebugLocEntries. auto CurEntry = DebugLoc.rbegin(); DEBUG({ dbgs() << CurEntry->getValues().size() << " Values:\n"; for (auto &Value : CurEntry->getValues()) Value.getExpression()->dump(); dbgs() << "-----\n"; }); auto PrevEntry = std::next(CurEntry); if (PrevEntry != DebugLoc.rend() && PrevEntry->MergeRanges(*CurEntry)) DebugLoc.pop_back(); } } DbgVariable *DwarfDebug::createConcreteVariable(LexicalScope &Scope, InlinedVariable IV) { ensureAbstractVariableIsCreatedIfScoped(IV, Scope.getScopeNode()); ConcreteVariables.push_back( make_unique<DbgVariable>(IV.first, IV.second, this)); InfoHolder.addScopeVariable(&Scope, ConcreteVariables.back().get()); return ConcreteVariables.back().get(); } // Find variables for each lexical scope. void DwarfDebug::collectVariableInfo(DwarfCompileUnit &TheCU, const DISubprogram *SP, DenseSet<InlinedVariable> &Processed) { // Grab the variable info that was squirreled away in the MMI side-table. collectVariableInfoFromMMITable(Processed); for (const auto &I : DbgValues) { InlinedVariable IV = I.first; if (Processed.count(IV)) continue; // Instruction ranges, specifying where IV is accessible. const auto &Ranges = I.second; if (Ranges.empty()) continue; LexicalScope *Scope = nullptr; if (const DILocation *IA = IV.second) Scope = LScopes.findInlinedScope(IV.first->getScope(), IA); else Scope = LScopes.findLexicalScope(IV.first->getScope()); // If variable scope is not found then skip this variable. if (!Scope) continue; Processed.insert(IV); DbgVariable *RegVar = createConcreteVariable(*Scope, IV); const MachineInstr *MInsn = Ranges.front().first; assert(MInsn->isDebugValue() && "History must begin with debug value"); // Check if the first DBG_VALUE is valid for the rest of the function. if (Ranges.size() == 1 && Ranges.front().second == nullptr) { RegVar->initializeDbgValue(MInsn); continue; } // Handle multiple DBG_VALUE instructions describing one variable. DebugLocStream::ListBuilder List(DebugLocs, TheCU, *Asm, *RegVar, *MInsn); // Build the location list for this variable. SmallVector<DebugLocEntry, 8> Entries; buildLocationList(Entries, Ranges); // If the variable has an DIBasicType, extract it. Basic types cannot have // unique identifiers, so don't bother resolving the type with the // identifier map. const DIBasicType *BT = dyn_cast<DIBasicType>( static_cast<const Metadata *>(IV.first->getType())); // Finalize the entry by lowering it into a DWARF bytestream. for (auto &Entry : Entries) Entry.finalize(*Asm, List, BT); } // Collect info for variables that were optimized out. for (const DILocalVariable *DV : SP->getVariables()) { if (Processed.insert(InlinedVariable(DV, nullptr)).second) if (LexicalScope *Scope = LScopes.findLexicalScope(DV->getScope())) createConcreteVariable(*Scope, InlinedVariable(DV, nullptr)); } } // Return Label preceding the instruction. MCSymbol *DwarfDebug::getLabelBeforeInsn(const MachineInstr *MI) { MCSymbol *Label = LabelsBeforeInsn.lookup(MI); assert(Label && "Didn't insert label before instruction"); return Label; } // Return Label immediately following the instruction. MCSymbol *DwarfDebug::getLabelAfterInsn(const MachineInstr *MI) { return LabelsAfterInsn.lookup(MI); } // Process beginning of an instruction. void DwarfDebug::beginInstruction(const MachineInstr *MI) { assert(CurMI == nullptr); CurMI = MI; // Check if source location changes, but ignore DBG_VALUE locations. if (!MI->isDebugValue()) { DebugLoc DL = MI->getDebugLoc(); if (DL != PrevInstLoc) { if (DL) { unsigned Flags = 0; PrevInstLoc = DL; if (DL == PrologEndLoc) { Flags |= DWARF2_FLAG_PROLOGUE_END; PrologEndLoc = DebugLoc(); Flags |= DWARF2_FLAG_IS_STMT; } if (DL.getLine() != Asm->OutStreamer->getContext().getCurrentDwarfLoc().getLine()) Flags |= DWARF2_FLAG_IS_STMT; const MDNode *Scope = DL.getScope(); recordSourceLine(DL.getLine(), DL.getCol(), Scope, Flags); } else if (UnknownLocations) { PrevInstLoc = DL; recordSourceLine(0, 0, nullptr, 0); } } } // Insert labels where requested. DenseMap<const MachineInstr *, MCSymbol *>::iterator I = LabelsBeforeInsn.find(MI); // No label needed. if (I == LabelsBeforeInsn.end()) return; // Label already assigned. if (I->second) return; if (!PrevLabel) { PrevLabel = MMI->getContext().createTempSymbol(); Asm->OutStreamer->EmitLabel(PrevLabel); } I->second = PrevLabel; } // Process end of an instruction. void DwarfDebug::endInstruction() { assert(CurMI != nullptr); // Don't create a new label after DBG_VALUE instructions. // They don't generate code. if (!CurMI->isDebugValue()) PrevLabel = nullptr; DenseMap<const MachineInstr *, MCSymbol *>::iterator I = LabelsAfterInsn.find(CurMI); CurMI = nullptr; // No label needed. if (I == LabelsAfterInsn.end()) return; // Label already assigned. if (I->second) return; // We need a label after this instruction. if (!PrevLabel) { PrevLabel = MMI->getContext().createTempSymbol(); Asm->OutStreamer->EmitLabel(PrevLabel); } I->second = PrevLabel; } // Each LexicalScope has first instruction and last instruction to mark // beginning and end of a scope respectively. Create an inverse map that list // scopes starts (and ends) with an instruction. One instruction may start (or // end) multiple scopes. Ignore scopes that are not reachable. void DwarfDebug::identifyScopeMarkers() { SmallVector<LexicalScope *, 4> WorkList; WorkList.push_back(LScopes.getCurrentFunctionScope()); while (!WorkList.empty()) { LexicalScope *S = WorkList.pop_back_val(); const SmallVectorImpl<LexicalScope *> &Children = S->getChildren(); if (!Children.empty()) WorkList.append(Children.begin(), Children.end()); if (S->isAbstractScope()) continue; for (const InsnRange &R : S->getRanges()) { assert(R.first && "InsnRange does not have first instruction!"); assert(R.second && "InsnRange does not have second instruction!"); requestLabelBeforeInsn(R.first); requestLabelAfterInsn(R.second); } } } static DebugLoc findPrologueEndLoc(const MachineFunction *MF) { // First known non-DBG_VALUE and non-frame setup location marks // the beginning of the function body. for (const auto &MBB : *MF) for (const auto &MI : MBB) if (!MI.isDebugValue() && !MI.getFlag(MachineInstr::FrameSetup) && MI.getDebugLoc()) { // Did the target forget to set the FrameSetup flag for CFI insns? assert(!MI.isCFIInstruction() && "First non-frame-setup instruction is a CFI instruction."); return MI.getDebugLoc(); } return DebugLoc(); } // Gather pre-function debug information. Assumes being called immediately // after the function entry point has been emitted. void DwarfDebug::beginFunction(const MachineFunction *MF) { CurFn = MF; // If there's no debug info for the function we're not going to do anything. if (!MMI->hasDebugInfo()) return; auto DI = FunctionDIs.find(MF->getFunction()); if (DI == FunctionDIs.end()) return; // Grab the lexical scopes for the function, if we don't have any of those // then we're not going to be able to do anything. LScopes.initialize(*MF); if (LScopes.empty()) return; assert(DbgValues.empty() && "DbgValues map wasn't cleaned!"); // Make sure that each lexical scope will have a begin/end label. identifyScopeMarkers(); // Set DwarfDwarfCompileUnitID in MCContext to the Compile Unit this function // belongs to so that we add to the correct per-cu line table in the // non-asm case. LexicalScope *FnScope = LScopes.getCurrentFunctionScope(); // FnScope->getScopeNode() and DI->second should represent the same function, // though they may not be the same MDNode due to inline functions merged in // LTO where the debug info metadata still differs (either due to distinct // written differences - two versions of a linkonce_odr function // written/copied into two separate files, or some sub-optimal metadata that // isn't structurally identical (see: file path/name info from clang, which // includes the directory of the cpp file being built, even when the file name // is absolute (such as an <> lookup header))) DwarfCompileUnit *TheCU = SPMap.lookup(FnScope->getScopeNode()); assert(TheCU && "Unable to find compile unit!"); if (Asm->OutStreamer->hasRawTextSupport()) // Use a single line table if we are generating assembly. Asm->OutStreamer->getContext().setDwarfCompileUnitID(0); else Asm->OutStreamer->getContext().setDwarfCompileUnitID(TheCU->getUniqueID()); // Calculate history for local variables. calculateDbgValueHistory(MF, Asm->MF->getSubtarget().getRegisterInfo(), DbgValues); // Request labels for the full history. for (const auto &I : DbgValues) { const auto &Ranges = I.second; if (Ranges.empty()) continue; // The first mention of a function argument gets the CurrentFnBegin // label, so arguments are visible when breaking at function entry. const DILocalVariable *DIVar = Ranges.front().first->getDebugVariable(); if (DIVar->getTag() == dwarf::DW_TAG_arg_variable && getDISubprogram(DIVar->getScope())->describes(MF->getFunction())) { LabelsBeforeInsn[Ranges.front().first] = Asm->getFunctionBegin(); if (Ranges.front().first->getDebugExpression()->isBitPiece()) { // Mark all non-overlapping initial pieces. for (auto I = Ranges.begin(); I != Ranges.end(); ++I) { const DIExpression *Piece = I->first->getDebugExpression(); if (std::all_of(Ranges.begin(), I, [&](DbgValueHistoryMap::InstrRange Pred) { return !piecesOverlap(Piece, Pred.first->getDebugExpression()); })) LabelsBeforeInsn[I->first] = Asm->getFunctionBegin(); else break; } } } for (const auto &Range : Ranges) { requestLabelBeforeInsn(Range.first); if (Range.second) requestLabelAfterInsn(Range.second); } } PrevInstLoc = DebugLoc(); PrevLabel = Asm->getFunctionBegin(); // Record beginning of function. PrologEndLoc = findPrologueEndLoc(MF); if (DILocation *L = PrologEndLoc) { // We'd like to list the prologue as "not statements" but GDB behaves // poorly if we do that. Revisit this with caution/GDB (7.5+) testing. auto *SP = L->getInlinedAtScope()->getSubprogram(); recordSourceLine(SP->getScopeLine(), 0, SP, DWARF2_FLAG_IS_STMT); } } // Gather and emit post-function debug information. void DwarfDebug::endFunction(const MachineFunction *MF) { assert(CurFn == MF && "endFunction should be called with the same function as beginFunction"); if (!MMI->hasDebugInfo() || LScopes.empty() || !FunctionDIs.count(MF->getFunction())) { // If we don't have a lexical scope for this function then there will // be a hole in the range information. Keep note of this by setting the // previously used section to nullptr. PrevCU = nullptr; CurFn = nullptr; return; } // Set DwarfDwarfCompileUnitID in MCContext to default value. Asm->OutStreamer->getContext().setDwarfCompileUnitID(0); LexicalScope *FnScope = LScopes.getCurrentFunctionScope(); auto *SP = cast<DISubprogram>(FnScope->getScopeNode()); DwarfCompileUnit &TheCU = *SPMap.lookup(SP); DenseSet<InlinedVariable> ProcessedVars; collectVariableInfo(TheCU, SP, ProcessedVars); // Add the range of this function to the list of ranges for the CU. TheCU.addRange(RangeSpan(Asm->getFunctionBegin(), Asm->getFunctionEnd())); // Under -gmlt, skip building the subprogram if there are no inlined // subroutines inside it. if (TheCU.getCUNode()->getEmissionKind() == DIBuilder::LineTablesOnly && LScopes.getAbstractScopesList().empty() && !IsDarwin) { assert(InfoHolder.getScopeVariables().empty()); assert(DbgValues.empty()); // FIXME: This wouldn't be true in LTO with a -g (with inlining) CU followed // by a -gmlt CU. Add a test and remove this assertion. assert(AbstractVariables.empty()); LabelsBeforeInsn.clear(); LabelsAfterInsn.clear(); PrevLabel = nullptr; CurFn = nullptr; return; } #ifndef NDEBUG size_t NumAbstractScopes = LScopes.getAbstractScopesList().size(); #endif // Construct abstract scopes. for (LexicalScope *AScope : LScopes.getAbstractScopesList()) { auto *SP = cast<DISubprogram>(AScope->getScopeNode()); // Collect info for variables that were optimized out. for (const DILocalVariable *DV : SP->getVariables()) { if (!ProcessedVars.insert(InlinedVariable(DV, nullptr)).second) continue; ensureAbstractVariableIsCreated(InlinedVariable(DV, nullptr), DV->getScope()); assert(LScopes.getAbstractScopesList().size() == NumAbstractScopes && "ensureAbstractVariableIsCreated inserted abstract scopes"); } constructAbstractSubprogramScopeDIE(AScope); } TheCU.constructSubprogramScopeDIE(FnScope); if (auto *SkelCU = TheCU.getSkeleton()) if (!LScopes.getAbstractScopesList().empty()) SkelCU->constructSubprogramScopeDIE(FnScope); // Clear debug info // Ownership of DbgVariables is a bit subtle - ScopeVariables owns all the // DbgVariables except those that are also in AbstractVariables (since they // can be used cross-function) InfoHolder.getScopeVariables().clear(); DbgValues.clear(); LabelsBeforeInsn.clear(); LabelsAfterInsn.clear(); PrevLabel = nullptr; CurFn = nullptr; } // Register a source line with debug info. Returns the unique label that was // emitted and which provides correspondence to the source line list. void DwarfDebug::recordSourceLine(unsigned Line, unsigned Col, const MDNode *S, unsigned Flags) { StringRef Fn; StringRef Dir; unsigned Src = 1; unsigned Discriminator = 0; if (auto *Scope = cast_or_null<DIScope>(S)) { Fn = Scope->getFilename(); Dir = Scope->getDirectory(); if (auto *LBF = dyn_cast<DILexicalBlockFile>(Scope)) Discriminator = LBF->getDiscriminator(); unsigned CUID = Asm->OutStreamer->getContext().getDwarfCompileUnitID(); Src = static_cast<DwarfCompileUnit &>(*InfoHolder.getUnits()[CUID]) .getOrCreateSourceID(Fn, Dir); } Asm->OutStreamer->EmitDwarfLocDirective(Src, Line, Col, Flags, 0, Discriminator, Fn); } //===----------------------------------------------------------------------===// // Emit Methods //===----------------------------------------------------------------------===// // Emit the debug info section. void DwarfDebug::emitDebugInfo() { DwarfFile &Holder = useSplitDwarf() ? SkeletonHolder : InfoHolder; Holder.emitUnits(/* UseOffsets */ false); } // Emit the abbreviation section. void DwarfDebug::emitAbbreviations() { DwarfFile &Holder = useSplitDwarf() ? SkeletonHolder : InfoHolder; Holder.emitAbbrevs(Asm->getObjFileLowering().getDwarfAbbrevSection()); } void DwarfDebug::emitAccel(DwarfAccelTable &Accel, MCSection *Section, StringRef TableName) { Accel.FinalizeTable(Asm, TableName); Asm->OutStreamer->SwitchSection(Section); // Emit the full data. Accel.emit(Asm, Section->getBeginSymbol(), this); } // Emit visible names into a hashed accelerator table section. void DwarfDebug::emitAccelNames() { emitAccel(AccelNames, Asm->getObjFileLowering().getDwarfAccelNamesSection(), "Names"); } // Emit objective C classes and categories into a hashed accelerator table // section. void DwarfDebug::emitAccelObjC() { emitAccel(AccelObjC, Asm->getObjFileLowering().getDwarfAccelObjCSection(), "ObjC"); } // Emit namespace dies into a hashed accelerator table. void DwarfDebug::emitAccelNamespaces() { emitAccel(AccelNamespace, Asm->getObjFileLowering().getDwarfAccelNamespaceSection(), "namespac"); } // Emit type dies into a hashed accelerator table. void DwarfDebug::emitAccelTypes() { emitAccel(AccelTypes, Asm->getObjFileLowering().getDwarfAccelTypesSection(), "types"); } // Public name handling. // The format for the various pubnames: // // dwarf pubnames - offset/name pairs where the offset is the offset into the CU // for the DIE that is named. // // gnu pubnames - offset/index value/name tuples where the offset is the offset // into the CU and the index value is computed according to the type of value // for the DIE that is named. // // For type units the offset is the offset of the skeleton DIE. For split dwarf // it's the offset within the debug_info/debug_types dwo section, however, the // reference in the pubname header doesn't change. /// computeIndexValue - Compute the gdb index value for the DIE and CU. static dwarf::PubIndexEntryDescriptor computeIndexValue(DwarfUnit *CU, const DIE *Die) { dwarf::GDBIndexEntryLinkage Linkage = dwarf::GIEL_STATIC; // We could have a specification DIE that has our most of our knowledge, // look for that now. if (DIEValue SpecVal = Die->findAttribute(dwarf::DW_AT_specification)) { DIE &SpecDIE = SpecVal.getDIEEntry().getEntry(); if (SpecDIE.findAttribute(dwarf::DW_AT_external)) Linkage = dwarf::GIEL_EXTERNAL; } else if (Die->findAttribute(dwarf::DW_AT_external)) Linkage = dwarf::GIEL_EXTERNAL; switch (Die->getTag()) { case dwarf::DW_TAG_class_type: case dwarf::DW_TAG_structure_type: case dwarf::DW_TAG_union_type: case dwarf::DW_TAG_enumeration_type: return dwarf::PubIndexEntryDescriptor( dwarf::GIEK_TYPE, CU->getLanguage() != dwarf::DW_LANG_C_plus_plus ? dwarf::GIEL_STATIC : dwarf::GIEL_EXTERNAL); case dwarf::DW_TAG_typedef: case dwarf::DW_TAG_base_type: case dwarf::DW_TAG_subrange_type: return dwarf::PubIndexEntryDescriptor(dwarf::GIEK_TYPE, dwarf::GIEL_STATIC); case dwarf::DW_TAG_namespace: return dwarf::GIEK_TYPE; case dwarf::DW_TAG_subprogram: return dwarf::PubIndexEntryDescriptor(dwarf::GIEK_FUNCTION, Linkage); case dwarf::DW_TAG_variable: return dwarf::PubIndexEntryDescriptor(dwarf::GIEK_VARIABLE, Linkage); case dwarf::DW_TAG_enumerator: return dwarf::PubIndexEntryDescriptor(dwarf::GIEK_VARIABLE, dwarf::GIEL_STATIC); default: return dwarf::GIEK_NONE; } } /// emitDebugPubNames - Emit visible names into a debug pubnames section. /// void DwarfDebug::emitDebugPubNames(bool GnuStyle) { MCSection *PSec = GnuStyle ? Asm->getObjFileLowering().getDwarfGnuPubNamesSection() : Asm->getObjFileLowering().getDwarfPubNamesSection(); emitDebugPubSection(GnuStyle, PSec, "Names", &DwarfCompileUnit::getGlobalNames); } void DwarfDebug::emitDebugPubSection( bool GnuStyle, MCSection *PSec, StringRef Name, const StringMap<const DIE *> &(DwarfCompileUnit::*Accessor)() const) { for (const auto &NU : CUMap) { DwarfCompileUnit *TheU = NU.second; const auto &Globals = (TheU->*Accessor)(); if (Globals.empty()) continue; if (auto *Skeleton = TheU->getSkeleton()) TheU = Skeleton; // Start the dwarf pubnames section. Asm->OutStreamer->SwitchSection(PSec); // Emit the header. Asm->OutStreamer->AddComment("Length of Public " + Name + " Info"); MCSymbol *BeginLabel = Asm->createTempSymbol("pub" + Name + "_begin"); MCSymbol *EndLabel = Asm->createTempSymbol("pub" + Name + "_end"); Asm->EmitLabelDifference(EndLabel, BeginLabel, 4); Asm->OutStreamer->EmitLabel(BeginLabel); Asm->OutStreamer->AddComment("DWARF Version"); Asm->EmitInt16(dwarf::DW_PUBNAMES_VERSION); Asm->OutStreamer->AddComment("Offset of Compilation Unit Info"); Asm->emitDwarfSymbolReference(TheU->getLabelBegin()); Asm->OutStreamer->AddComment("Compilation Unit Length"); Asm->EmitInt32(TheU->getLength()); // Emit the pubnames for this compilation unit. for (const auto &GI : Globals) { const char *Name = GI.getKeyData(); const DIE *Entity = GI.second; Asm->OutStreamer->AddComment("DIE offset"); Asm->EmitInt32(Entity->getOffset()); if (GnuStyle) { dwarf::PubIndexEntryDescriptor Desc = computeIndexValue(TheU, Entity); Asm->OutStreamer->AddComment( Twine("Kind: ") + dwarf::GDBIndexEntryKindString(Desc.Kind) + ", " + dwarf::GDBIndexEntryLinkageString(Desc.Linkage)); Asm->EmitInt8(Desc.toBits()); } Asm->OutStreamer->AddComment("External Name"); Asm->OutStreamer->EmitBytes(StringRef(Name, GI.getKeyLength() + 1)); } Asm->OutStreamer->AddComment("End Mark"); Asm->EmitInt32(0); Asm->OutStreamer->EmitLabel(EndLabel); } } void DwarfDebug::emitDebugPubTypes(bool GnuStyle) { MCSection *PSec = GnuStyle ? Asm->getObjFileLowering().getDwarfGnuPubTypesSection() : Asm->getObjFileLowering().getDwarfPubTypesSection(); emitDebugPubSection(GnuStyle, PSec, "Types", &DwarfCompileUnit::getGlobalTypes); } // Emit visible names into a debug str section. void DwarfDebug::emitDebugStr() { DwarfFile &Holder = useSplitDwarf() ? SkeletonHolder : InfoHolder; Holder.emitStrings(Asm->getObjFileLowering().getDwarfStrSection()); } void DwarfDebug::emitDebugLocEntry(ByteStreamer &Streamer, const DebugLocStream::Entry &Entry) { auto &&Comments = DebugLocs.getComments(Entry); auto Comment = Comments.begin(); auto End = Comments.end(); for (uint8_t Byte : DebugLocs.getBytes(Entry)) Streamer.EmitInt8(Byte, Comment != End ? *(Comment++) : ""); } static void emitDebugLocValue(const AsmPrinter &AP, const DIBasicType *BT, ByteStreamer &Streamer, const DebugLocEntry::Value &Value, unsigned PieceOffsetInBits) { DebugLocDwarfExpression DwarfExpr(*AP.MF->getSubtarget().getRegisterInfo(), AP.getDwarfDebug()->getDwarfVersion(), Streamer); // Regular entry. if (Value.isInt()) { if (BT && (BT->getEncoding() == dwarf::DW_ATE_signed || BT->getEncoding() == dwarf::DW_ATE_signed_char)) DwarfExpr.AddSignedConstant(Value.getInt()); else DwarfExpr.AddUnsignedConstant(Value.getInt()); } else if (Value.isLocation()) { MachineLocation Loc = Value.getLoc(); const DIExpression *Expr = Value.getExpression(); if (!Expr || !Expr->getNumElements()) // Regular entry. AP.EmitDwarfRegOp(Streamer, Loc); else { // Complex address entry. if (Loc.getOffset()) { DwarfExpr.AddMachineRegIndirect(Loc.getReg(), Loc.getOffset()); DwarfExpr.AddExpression(Expr->expr_op_begin(), Expr->expr_op_end(), PieceOffsetInBits); } else DwarfExpr.AddMachineRegExpression(Expr, Loc.getReg(), PieceOffsetInBits); } } // else ... ignore constant fp. There is not any good way to // to represent them here in dwarf. // FIXME: ^ } void DebugLocEntry::finalize(const AsmPrinter &AP, DebugLocStream::ListBuilder &List, const DIBasicType *BT) { DebugLocStream::EntryBuilder Entry(List, Begin, End); BufferByteStreamer Streamer = Entry.getStreamer(); const DebugLocEntry::Value &Value = Values[0]; if (Value.isBitPiece()) { // Emit all pieces that belong to the same variable and range. assert(std::all_of(Values.begin(), Values.end(), [](DebugLocEntry::Value P) { return P.isBitPiece(); }) && "all values are expected to be pieces"); assert(std::is_sorted(Values.begin(), Values.end()) && "pieces are expected to be sorted"); unsigned Offset = 0; for (auto Piece : Values) { const DIExpression *Expr = Piece.getExpression(); unsigned PieceOffset = Expr->getBitPieceOffset(); unsigned PieceSize = Expr->getBitPieceSize(); assert(Offset <= PieceOffset && "overlapping or duplicate pieces"); if (Offset < PieceOffset) { // The DWARF spec seriously mandates pieces with no locations for gaps. DebugLocDwarfExpression Expr(*AP.MF->getSubtarget().getRegisterInfo(), AP.getDwarfDebug()->getDwarfVersion(), Streamer); Expr.AddOpPiece(PieceOffset-Offset, 0); Offset += PieceOffset-Offset; } Offset += PieceSize; emitDebugLocValue(AP, BT, Streamer, Piece, PieceOffset); } } else { assert(Values.size() == 1 && "only pieces may have >1 value"); emitDebugLocValue(AP, BT, Streamer, Value, 0); } } void DwarfDebug::emitDebugLocEntryLocation(const DebugLocStream::Entry &Entry) { // Emit the size. Asm->OutStreamer->AddComment("Loc expr size"); Asm->EmitInt16(DebugLocs.getBytes(Entry).size()); // Emit the entry. APByteStreamer Streamer(*Asm); emitDebugLocEntry(Streamer, Entry); } // Emit locations into the debug loc section. void DwarfDebug::emitDebugLoc() { // Start the dwarf loc section. Asm->OutStreamer->SwitchSection( Asm->getObjFileLowering().getDwarfLocSection()); unsigned char Size = Asm->getDataLayout().getPointerSize(); for (const auto &List : DebugLocs.getLists()) { Asm->OutStreamer->EmitLabel(List.Label); const DwarfCompileUnit *CU = List.CU; for (const auto &Entry : DebugLocs.getEntries(List)) { // Set up the range. This range is relative to the entry point of the // compile unit. This is a hard coded 0 for low_pc when we're emitting // ranges, or the DW_AT_low_pc on the compile unit otherwise. if (auto *Base = CU->getBaseAddress()) { Asm->EmitLabelDifference(Entry.BeginSym, Base, Size); Asm->EmitLabelDifference(Entry.EndSym, Base, Size); } else { Asm->OutStreamer->EmitSymbolValue(Entry.BeginSym, Size); Asm->OutStreamer->EmitSymbolValue(Entry.EndSym, Size); } emitDebugLocEntryLocation(Entry); } Asm->OutStreamer->EmitIntValue(0, Size); Asm->OutStreamer->EmitIntValue(0, Size); } } void DwarfDebug::emitDebugLocDWO() { Asm->OutStreamer->SwitchSection( Asm->getObjFileLowering().getDwarfLocDWOSection()); for (const auto &List : DebugLocs.getLists()) { Asm->OutStreamer->EmitLabel(List.Label); for (const auto &Entry : DebugLocs.getEntries(List)) { // Just always use start_length for now - at least that's one address // rather than two. We could get fancier and try to, say, reuse an // address we know we've emitted elsewhere (the start of the function? // The start of the CU or CU subrange that encloses this range?) Asm->EmitInt8(dwarf::DW_LLE_start_length_entry); unsigned idx = AddrPool.getIndex(Entry.BeginSym); Asm->EmitULEB128(idx); Asm->EmitLabelDifference(Entry.EndSym, Entry.BeginSym, 4); emitDebugLocEntryLocation(Entry); } Asm->EmitInt8(dwarf::DW_LLE_end_of_list_entry); } } struct ArangeSpan { const MCSymbol *Start, *End; }; // Emit a debug aranges section, containing a CU lookup for any // address we can tie back to a CU. void DwarfDebug::emitDebugARanges() { // Provides a unique id per text section. MapVector<MCSection *, SmallVector<SymbolCU, 8>> SectionMap; // Filter labels by section. for (const SymbolCU &SCU : ArangeLabels) { if (SCU.Sym->isInSection()) { // Make a note of this symbol and it's section. MCSection *Section = &SCU.Sym->getSection(); if (!Section->getKind().isMetadata()) SectionMap[Section].push_back(SCU); } else { // Some symbols (e.g. common/bss on mach-o) can have no section but still // appear in the output. This sucks as we rely on sections to build // arange spans. We can do it without, but it's icky. SectionMap[nullptr].push_back(SCU); } } // Add terminating symbols for each section. for (const auto &I : SectionMap) { MCSection *Section = I.first; MCSymbol *Sym = nullptr; if (Section) Sym = Asm->OutStreamer->endSection(Section); // Insert a final terminator. SectionMap[Section].push_back(SymbolCU(nullptr, Sym)); } DenseMap<DwarfCompileUnit *, std::vector<ArangeSpan>> Spans; for (auto &I : SectionMap) { const MCSection *Section = I.first; SmallVector<SymbolCU, 8> &List = I.second; if (List.size() < 2) continue; // If we have no section (e.g. common), just write out // individual spans for each symbol. if (!Section) { for (const SymbolCU &Cur : List) { ArangeSpan Span; Span.Start = Cur.Sym; Span.End = nullptr; if (Cur.CU) Spans[Cur.CU].push_back(Span); } continue; } // Sort the symbols by offset within the section. std::sort(List.begin(), List.end(), [&](const SymbolCU &A, const SymbolCU &B) { unsigned IA = A.Sym ? Asm->OutStreamer->GetSymbolOrder(A.Sym) : 0; unsigned IB = B.Sym ? Asm->OutStreamer->GetSymbolOrder(B.Sym) : 0; // Symbols with no order assigned should be placed at the end. // (e.g. section end labels) if (IA == 0) return false; if (IB == 0) return true; return IA < IB; }); // Build spans between each label. const MCSymbol *StartSym = List[0].Sym; for (size_t n = 1, e = List.size(); n < e; n++) { const SymbolCU &Prev = List[n - 1]; const SymbolCU &Cur = List[n]; // Try and build the longest span we can within the same CU. if (Cur.CU != Prev.CU) { ArangeSpan Span; Span.Start = StartSym; Span.End = Cur.Sym; Spans[Prev.CU].push_back(Span); StartSym = Cur.Sym; } } } // Start the dwarf aranges section. Asm->OutStreamer->SwitchSection( Asm->getObjFileLowering().getDwarfARangesSection()); unsigned PtrSize = Asm->getDataLayout().getPointerSize(); // Build a list of CUs used. std::vector<DwarfCompileUnit *> CUs; for (const auto &it : Spans) { DwarfCompileUnit *CU = it.first; CUs.push_back(CU); } // Sort the CU list (again, to ensure consistent output order). std::sort(CUs.begin(), CUs.end(), [](const DwarfUnit *A, const DwarfUnit *B) { return A->getUniqueID() < B->getUniqueID(); }); // Emit an arange table for each CU we used. for (DwarfCompileUnit *CU : CUs) { std::vector<ArangeSpan> &List = Spans[CU]; // Describe the skeleton CU's offset and length, not the dwo file's. if (auto *Skel = CU->getSkeleton()) CU = Skel; // Emit size of content not including length itself. unsigned ContentSize = sizeof(int16_t) + // DWARF ARange version number sizeof(int32_t) + // Offset of CU in the .debug_info section sizeof(int8_t) + // Pointer Size (in bytes) sizeof(int8_t); // Segment Size (in bytes) unsigned TupleSize = PtrSize * 2; // 7.20 in the Dwarf specs requires the table to be aligned to a tuple. unsigned Padding = OffsetToAlignment(sizeof(int32_t) + ContentSize, TupleSize); ContentSize += Padding; ContentSize += (List.size() + 1) * TupleSize; // For each compile unit, write the list of spans it covers. Asm->OutStreamer->AddComment("Length of ARange Set"); Asm->EmitInt32(ContentSize); Asm->OutStreamer->AddComment("DWARF Arange version number"); Asm->EmitInt16(dwarf::DW_ARANGES_VERSION); Asm->OutStreamer->AddComment("Offset Into Debug Info Section"); Asm->emitDwarfSymbolReference(CU->getLabelBegin()); Asm->OutStreamer->AddComment("Address Size (in bytes)"); Asm->EmitInt8(PtrSize); Asm->OutStreamer->AddComment("Segment Size (in bytes)"); Asm->EmitInt8(0); Asm->OutStreamer->EmitFill(Padding, 0xff); for (const ArangeSpan &Span : List) { Asm->EmitLabelReference(Span.Start, PtrSize); // Calculate the size as being from the span start to it's end. if (Span.End) { Asm->EmitLabelDifference(Span.End, Span.Start, PtrSize); } else { // For symbols without an end marker (e.g. common), we // write a single arange entry containing just that one symbol. uint64_t Size = SymSize[Span.Start]; if (Size == 0) Size = 1; Asm->OutStreamer->EmitIntValue(Size, PtrSize); } } Asm->OutStreamer->AddComment("ARange terminator"); Asm->OutStreamer->EmitIntValue(0, PtrSize); Asm->OutStreamer->EmitIntValue(0, PtrSize); } } // Emit visible names into a debug ranges section. void DwarfDebug::emitDebugRanges() { // Start the dwarf ranges section. Asm->OutStreamer->SwitchSection( Asm->getObjFileLowering().getDwarfRangesSection()); // Size for our labels. unsigned char Size = Asm->getDataLayout().getPointerSize(); // Grab the specific ranges for the compile units in the module. for (const auto &I : CUMap) { DwarfCompileUnit *TheCU = I.second; if (auto *Skel = TheCU->getSkeleton()) TheCU = Skel; // Iterate over the misc ranges for the compile units in the module. for (const RangeSpanList &List : TheCU->getRangeLists()) { // Emit our symbol so we can find the beginning of the range. Asm->OutStreamer->EmitLabel(List.getSym()); for (const RangeSpan &Range : List.getRanges()) { const MCSymbol *Begin = Range.getStart(); const MCSymbol *End = Range.getEnd(); assert(Begin && "Range without a begin symbol?"); assert(End && "Range without an end symbol?"); if (auto *Base = TheCU->getBaseAddress()) { Asm->EmitLabelDifference(Begin, Base, Size); Asm->EmitLabelDifference(End, Base, Size); } else { Asm->OutStreamer->EmitSymbolValue(Begin, Size); Asm->OutStreamer->EmitSymbolValue(End, Size); } } // And terminate the list with two 0 values. Asm->OutStreamer->EmitIntValue(0, Size); Asm->OutStreamer->EmitIntValue(0, Size); } } } // DWARF5 Experimental Separate Dwarf emitters. void DwarfDebug::initSkeletonUnit(const DwarfUnit &U, DIE &Die, std::unique_ptr<DwarfUnit> NewU) { NewU->addString(Die, dwarf::DW_AT_GNU_dwo_name, U.getCUNode()->getSplitDebugFilename()); if (!CompilationDir.empty()) NewU->addString(Die, dwarf::DW_AT_comp_dir, CompilationDir); addGnuPubAttributes(*NewU, Die); SkeletonHolder.addUnit(std::move(NewU)); } // This DIE has the following attributes: DW_AT_comp_dir, DW_AT_stmt_list, // DW_AT_low_pc, DW_AT_high_pc, DW_AT_ranges, DW_AT_dwo_name, DW_AT_dwo_id, // DW_AT_addr_base, DW_AT_ranges_base. DwarfCompileUnit &DwarfDebug::constructSkeletonCU(const DwarfCompileUnit &CU) { auto OwnedUnit = make_unique<DwarfCompileUnit>( CU.getUniqueID(), CU.getCUNode(), Asm, this, &SkeletonHolder); DwarfCompileUnit &NewCU = *OwnedUnit; NewCU.initSection(Asm->getObjFileLowering().getDwarfInfoSection()); NewCU.initStmtList(); initSkeletonUnit(CU, NewCU.getUnitDie(), std::move(OwnedUnit)); return NewCU; } // Emit the .debug_info.dwo section for separated dwarf. This contains the // compile units that would normally be in debug_info. void DwarfDebug::emitDebugInfoDWO() { assert(useSplitDwarf() && "No split dwarf debug info?"); // Don't emit relocations into the dwo file. InfoHolder.emitUnits(/* UseOffsets */ true); } // Emit the .debug_abbrev.dwo section for separated dwarf. This contains the // abbreviations for the .debug_info.dwo section. void DwarfDebug::emitDebugAbbrevDWO() { assert(useSplitDwarf() && "No split dwarf?"); InfoHolder.emitAbbrevs(Asm->getObjFileLowering().getDwarfAbbrevDWOSection()); } void DwarfDebug::emitDebugLineDWO() { assert(useSplitDwarf() && "No split dwarf?"); Asm->OutStreamer->SwitchSection( Asm->getObjFileLowering().getDwarfLineDWOSection()); SplitTypeUnitFileTable.Emit(*Asm->OutStreamer); } // Emit the .debug_str.dwo section for separated dwarf. This contains the // string section and is identical in format to traditional .debug_str // sections. void DwarfDebug::emitDebugStrDWO() { assert(useSplitDwarf() && "No split dwarf?"); MCSection *OffSec = Asm->getObjFileLowering().getDwarfStrOffDWOSection(); InfoHolder.emitStrings(Asm->getObjFileLowering().getDwarfStrDWOSection(), OffSec); } MCDwarfDwoLineTable *DwarfDebug::getDwoLineTable(const DwarfCompileUnit &CU) { if (!useSplitDwarf()) return nullptr; if (SingleCU) SplitTypeUnitFileTable.setCompilationDir(CU.getCUNode()->getDirectory()); return &SplitTypeUnitFileTable; } static uint64_t makeTypeSignature(StringRef Identifier) { MD5 Hash; Hash.update(Identifier); // ... take the least significant 8 bytes and return those. Our MD5 // implementation always returns its results in little endian, swap bytes // appropriately. MD5::MD5Result Result; Hash.final(Result); return support::endian::read64le(Result + 8); } void DwarfDebug::addDwarfTypeUnitType(DwarfCompileUnit &CU, StringRef Identifier, DIE &RefDie, const DICompositeType *CTy) { // Fast path if we're building some type units and one has already used the // address pool we know we're going to throw away all this work anyway, so // don't bother building dependent types. if (!TypeUnitsUnderConstruction.empty() && AddrPool.hasBeenUsed()) return; const DwarfTypeUnit *&TU = DwarfTypeUnits[CTy]; if (TU) { CU.addDIETypeSignature(RefDie, *TU); return; } bool TopLevelType = TypeUnitsUnderConstruction.empty(); AddrPool.resetUsedFlag(); auto OwnedUnit = make_unique<DwarfTypeUnit>( InfoHolder.getUnits().size() + TypeUnitsUnderConstruction.size(), CU, Asm, this, &InfoHolder, getDwoLineTable(CU)); DwarfTypeUnit &NewTU = *OwnedUnit; DIE &UnitDie = NewTU.getUnitDie(); TU = &NewTU; TypeUnitsUnderConstruction.push_back( std::make_pair(std::move(OwnedUnit), CTy)); NewTU.addUInt(UnitDie, dwarf::DW_AT_language, dwarf::DW_FORM_data2, CU.getLanguage()); uint64_t Signature = makeTypeSignature(Identifier); NewTU.setTypeSignature(Signature); if (useSplitDwarf()) NewTU.initSection(Asm->getObjFileLowering().getDwarfTypesDWOSection()); else { CU.applyStmtList(UnitDie); NewTU.initSection( Asm->getObjFileLowering().getDwarfTypesSection(Signature)); } NewTU.setType(NewTU.createTypeDIE(CTy)); if (TopLevelType) { auto TypeUnitsToAdd = std::move(TypeUnitsUnderConstruction); TypeUnitsUnderConstruction.clear(); // Types referencing entries in the address table cannot be placed in type // units. if (AddrPool.hasBeenUsed()) { // Remove all the types built while building this type. // This is pessimistic as some of these types might not be dependent on // the type that used an address. for (const auto &TU : TypeUnitsToAdd) DwarfTypeUnits.erase(TU.second); // Construct this type in the CU directly. // This is inefficient because all the dependent types will be rebuilt // from scratch, including building them in type units, discovering that // they depend on addresses, throwing them out and rebuilding them. CU.constructTypeDIE(RefDie, cast<DICompositeType>(CTy)); return; } // If the type wasn't dependent on fission addresses, finish adding the type // and all its dependent types. for (auto &TU : TypeUnitsToAdd) InfoHolder.addUnit(std::move(TU.first)); } CU.addDIETypeSignature(RefDie, NewTU); } // Accelerator table mutators - add each name along with its companion // DIE to the proper table while ensuring that the name that we're going // to reference is in the string table. We do this since the names we // add may not only be identical to the names in the DIE. void DwarfDebug::addAccelName(StringRef Name, const DIE &Die) { if (!useDwarfAccelTables()) return; AccelNames.AddName(InfoHolder.getStringPool().getEntry(*Asm, Name), &Die); } void DwarfDebug::addAccelObjC(StringRef Name, const DIE &Die) { if (!useDwarfAccelTables()) return; AccelObjC.AddName(InfoHolder.getStringPool().getEntry(*Asm, Name), &Die); } void DwarfDebug::addAccelNamespace(StringRef Name, const DIE &Die) { if (!useDwarfAccelTables()) return; AccelNamespace.AddName(InfoHolder.getStringPool().getEntry(*Asm, Name), &Die); } void DwarfDebug::addAccelType(StringRef Name, const DIE &Die, char Flags) { if (!useDwarfAccelTables()) return; AccelTypes.AddName(InfoHolder.getStringPool().getEntry(*Asm, Name), &Die); }
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DebugLocStream.cpp
//===- DebugLocStream.cpp - DWARF debug_loc stream --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "DebugLocStream.h" #include "DwarfDebug.h" #include "llvm/CodeGen/AsmPrinter.h" using namespace llvm; bool DebugLocStream::finalizeList(AsmPrinter &Asm) { if (Lists.back().EntryOffset == Entries.size()) { // Empty list. Delete it. Lists.pop_back(); return false; } // Real list. Generate a label for it. Lists.back().Label = Asm.createTempSymbol("debug_loc"); return true; } void DebugLocStream::finalizeEntry() { if (Entries.back().ByteOffset != DWARFBytes.size()) return; // The last entry was empty. Delete it. Comments.erase(Comments.begin() + Entries.back().CommentOffset, Comments.end()); Entries.pop_back(); assert(Lists.back().EntryOffset <= Entries.size() && "Popped off more entries than are in the list"); } DebugLocStream::ListBuilder::~ListBuilder() { if (!Locs.finalizeList(Asm)) return; V.initializeDbgValue(&MI); V.setDebugLocListIndex(ListIndex); }
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
//===-- llvm/CodeGen/DwarfExpression.cpp - Dwarf Debug Framework ----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing dwarf debug info into asm files. // //===----------------------------------------------------------------------===// #include "DwarfExpression.h" #include "DwarfDebug.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/Support/Dwarf.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetSubtargetInfo.h" using namespace llvm; void DwarfExpression::AddReg(int DwarfReg, const char *Comment) { assert(DwarfReg >= 0 && "invalid negative dwarf register number"); if (DwarfReg < 32) { EmitOp(dwarf::DW_OP_reg0 + DwarfReg, Comment); } else { EmitOp(dwarf::DW_OP_regx, Comment); EmitUnsigned(DwarfReg); } } void DwarfExpression::AddRegIndirect(int DwarfReg, int Offset, bool Deref) { assert(DwarfReg >= 0 && "invalid negative dwarf register number"); if (DwarfReg < 32) { EmitOp(dwarf::DW_OP_breg0 + DwarfReg); } else { EmitOp(dwarf::DW_OP_bregx); EmitUnsigned(DwarfReg); } EmitSigned(Offset); if (Deref) EmitOp(dwarf::DW_OP_deref); } void DwarfExpression::AddOpPiece(unsigned SizeInBits, unsigned OffsetInBits) { assert(SizeInBits > 0 && "piece has size zero"); const unsigned SizeOfByte = 8; if (OffsetInBits > 0 || SizeInBits % SizeOfByte) { EmitOp(dwarf::DW_OP_bit_piece); EmitUnsigned(SizeInBits); EmitUnsigned(OffsetInBits); } else { EmitOp(dwarf::DW_OP_piece); unsigned ByteSize = SizeInBits / SizeOfByte; EmitUnsigned(ByteSize); } } void DwarfExpression::AddShr(unsigned ShiftBy) { EmitOp(dwarf::DW_OP_constu); EmitUnsigned(ShiftBy); EmitOp(dwarf::DW_OP_shr); } bool DwarfExpression::AddMachineRegIndirect(unsigned MachineReg, int Offset) { if (isFrameRegister(MachineReg)) { // If variable offset is based in frame register then use fbreg. EmitOp(dwarf::DW_OP_fbreg); EmitSigned(Offset); return true; } int DwarfReg = TRI.getDwarfRegNum(MachineReg, false); if (DwarfReg < 0) return false; AddRegIndirect(DwarfReg, Offset); return true; } bool DwarfExpression::AddMachineRegPiece(unsigned MachineReg, unsigned PieceSizeInBits, unsigned PieceOffsetInBits) { if (!TRI.isPhysicalRegister(MachineReg)) return false; int Reg = TRI.getDwarfRegNum(MachineReg, false); // If this is a valid register number, emit it. if (Reg >= 0) { AddReg(Reg); if (PieceSizeInBits) AddOpPiece(PieceSizeInBits, PieceOffsetInBits); return true; } // Walk up the super-register chain until we find a valid number. // For example, EAX on x86_64 is a 32-bit piece of RAX with offset 0. for (MCSuperRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) { Reg = TRI.getDwarfRegNum(*SR, false); if (Reg >= 0) { unsigned Idx = TRI.getSubRegIndex(*SR, MachineReg); unsigned Size = TRI.getSubRegIdxSize(Idx); unsigned RegOffset = TRI.getSubRegIdxOffset(Idx); AddReg(Reg, "super-register"); if (PieceOffsetInBits == RegOffset) { AddOpPiece(Size, RegOffset); } else { // If this is part of a variable in a sub-register at a // non-zero offset, we need to manually shift the value into // place, since the DW_OP_piece describes the part of the // variable, not the position of the subregister. if (RegOffset) AddShr(RegOffset); AddOpPiece(Size, PieceOffsetInBits); } return true; } } // Otherwise, attempt to find a covering set of sub-register numbers. // For example, Q0 on ARM is a composition of D0+D1. // // Keep track of the current position so we can emit the more // efficient DW_OP_piece. unsigned CurPos = PieceOffsetInBits; // The size of the register in bits, assuming 8 bits per byte. unsigned RegSize = TRI.getMinimalPhysRegClass(MachineReg)->getSize() * 8; // Keep track of the bits in the register we already emitted, so we // can avoid emitting redundant aliasing subregs. SmallBitVector Coverage(RegSize, false); for (MCSubRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) { unsigned Idx = TRI.getSubRegIndex(MachineReg, *SR); unsigned Size = TRI.getSubRegIdxSize(Idx); unsigned Offset = TRI.getSubRegIdxOffset(Idx); Reg = TRI.getDwarfRegNum(*SR, false); // Intersection between the bits we already emitted and the bits // covered by this subregister. SmallBitVector Intersection(RegSize, false); Intersection.set(Offset, Offset + Size); Intersection ^= Coverage; // If this sub-register has a DWARF number and we haven't covered // its range, emit a DWARF piece for it. if (Reg >= 0 && Intersection.any()) { AddReg(Reg, "sub-register"); AddOpPiece(Size, Offset == CurPos ? 0 : Offset); CurPos = Offset + Size; // Mark it as emitted. Coverage.set(Offset, Offset + Size); } } return CurPos > PieceOffsetInBits; } void DwarfExpression::AddSignedConstant(int Value) { EmitOp(dwarf::DW_OP_consts); EmitSigned(Value); // The proper way to describe a constant value is // DW_OP_constu <const>, DW_OP_stack_value. // Unfortunately, DW_OP_stack_value was not available until DWARF-4, // so we will continue to generate DW_OP_constu <const> for DWARF-2 // and DWARF-3. Technically, this is incorrect since DW_OP_const <const> // actually describes a value at a constant addess, not a constant value. // However, in the past there was no better way to describe a constant // value, so the producers and consumers started to rely on heuristics // to disambiguate the value vs. location status of the expression. // See PR21176 for more details. if (DwarfVersion >= 4) EmitOp(dwarf::DW_OP_stack_value); } void DwarfExpression::AddUnsignedConstant(unsigned Value) { EmitOp(dwarf::DW_OP_constu); EmitUnsigned(Value); // cf. comment in DwarfExpression::AddSignedConstant(). if (DwarfVersion >= 4) EmitOp(dwarf::DW_OP_stack_value); } static unsigned getOffsetOrZero(unsigned OffsetInBits, unsigned PieceOffsetInBits) { if (OffsetInBits == PieceOffsetInBits) return 0; assert(OffsetInBits >= PieceOffsetInBits && "overlapping pieces"); return OffsetInBits; } bool DwarfExpression::AddMachineRegExpression(const DIExpression *Expr, unsigned MachineReg, unsigned PieceOffsetInBits) { auto I = Expr->expr_op_begin(); auto E = Expr->expr_op_end(); if (I == E) return AddMachineRegPiece(MachineReg); // Pattern-match combinations for which more efficient representations exist // first. bool ValidReg = false; switch (I->getOp()) { case dwarf::DW_OP_bit_piece: { unsigned OffsetInBits = I->getArg(0); unsigned SizeInBits = I->getArg(1); // Piece always comes at the end of the expression. return AddMachineRegPiece(MachineReg, SizeInBits, getOffsetOrZero(OffsetInBits, PieceOffsetInBits)); } case dwarf::DW_OP_plus: { // [DW_OP_reg,Offset,DW_OP_plus,DW_OP_deref] --> [DW_OP_breg,Offset]. auto N = I.getNext(); if (N != E && N->getOp() == dwarf::DW_OP_deref) { unsigned Offset = I->getArg(0); ValidReg = AddMachineRegIndirect(MachineReg, Offset); std::advance(I, 2); break; } else ValidReg = AddMachineRegPiece(MachineReg); } case dwarf::DW_OP_deref: { // [DW_OP_reg,DW_OP_deref] --> [DW_OP_breg]. ValidReg = AddMachineRegIndirect(MachineReg); ++I; break; } default: llvm_unreachable("unsupported operand"); } if (!ValidReg) return false; // Emit remaining elements of the expression. AddExpression(I, E, PieceOffsetInBits); return true; } void DwarfExpression::AddExpression(DIExpression::expr_op_iterator I, DIExpression::expr_op_iterator E, unsigned PieceOffsetInBits) { for (; I != E; ++I) { switch (I->getOp()) { case dwarf::DW_OP_bit_piece: { unsigned OffsetInBits = I->getArg(0); unsigned SizeInBits = I->getArg(1); AddOpPiece(SizeInBits, getOffsetOrZero(OffsetInBits, PieceOffsetInBits)); break; } case dwarf::DW_OP_plus: EmitOp(dwarf::DW_OP_plus_uconst); EmitUnsigned(I->getArg(0)); break; case dwarf::DW_OP_deref: EmitOp(dwarf::DW_OP_deref); break; default: llvm_unreachable("unhandled opcode found in expression"); } } }
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/WinException.h
//===-- WinException.h - Windows Exception Handling ----------*- C++ -*--===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing windows exception info into asm files. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_WIN64EXCEPTION_H #define LLVM_LIB_CODEGEN_ASMPRINTER_WIN64EXCEPTION_H #include "EHStreamer.h" namespace llvm { class Function; class GlobalValue; class MachineFunction; class MCExpr; struct WinEHFuncInfo; class LLVM_LIBRARY_VISIBILITY WinException : public EHStreamer { /// Per-function flag to indicate if personality info should be emitted. bool shouldEmitPersonality = false; /// Per-function flag to indicate if the LSDA should be emitted. bool shouldEmitLSDA = false; /// Per-function flag to indicate if frame moves info should be emitted. bool shouldEmitMoves = false; /// True if this is a 64-bit target and we should use image relative offsets. bool useImageRel32 = false; void emitCSpecificHandlerTable(); /// Emit the EH table data for 32-bit and 64-bit functions using /// the __CxxFrameHandler3 personality. void emitCXXFrameHandler3Table(const MachineFunction *MF); /// Emit the EH table data for _except_handler3 and _except_handler4 /// personality functions. These are only used on 32-bit and do not use CFI /// tables. void emitExceptHandlerTable(const MachineFunction *MF); void extendIP2StateTable(const MachineFunction *MF, const Function *ParentF, WinEHFuncInfo &FuncInfo); /// Emits the label used with llvm.x86.seh.recoverfp, which is used by /// outlined funclets. void emitEHRegistrationOffsetLabel(const WinEHFuncInfo &FuncInfo, StringRef FLinkageName); const MCExpr *create32bitRef(const MCSymbol *Value); const MCExpr *create32bitRef(const GlobalValue *GV); public: //===--------------------------------------------------------------------===// // Main entry points. // WinException(AsmPrinter *A); ~WinException() override; /// Emit all exception information that should come after the content. void endModule() override; /// Gather pre-function exception information. Assumes being emitted /// immediately after the function entry point. void beginFunction(const MachineFunction *MF) override; /// Gather and emit post-function exception information. void endFunction(const MachineFunction *) override; }; } #endif
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfStringPool.h
//===-- llvm/CodeGen/DwarfStringPool.h - Dwarf Debug Framework -*- C++ -*--===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DWARFSTRINGPOOL_H #define LLVM_LIB_CODEGEN_ASMPRINTER_DWARFSTRINGPOOL_H #include "llvm/ADT/StringMap.h" #include "llvm/CodeGen/DwarfStringPoolEntry.h" #include "llvm/Support/Allocator.h" #include <utility> namespace llvm { class AsmPrinter; class MCSymbol; class MCSection; class StringRef; // Collection of strings for this unit and assorted symbols. // A String->Symbol mapping of strings used by indirect // references. class DwarfStringPool { typedef DwarfStringPoolEntry EntryTy; StringMap<EntryTy, BumpPtrAllocator &> Pool; StringRef Prefix; unsigned NumBytes = 0; bool ShouldCreateSymbols; public: typedef DwarfStringPoolEntryRef EntryRef; DwarfStringPool(BumpPtrAllocator &A, AsmPrinter &Asm, StringRef Prefix); void emit(AsmPrinter &Asm, MCSection *StrSection, MCSection *OffsetSection = nullptr); bool empty() const { return Pool.empty(); } /// Get a reference to an entry in the string pool. EntryRef getEntry(AsmPrinter &Asm, StringRef Str); }; } #endif
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/ARMException.cpp
//===-- CodeGen/AsmPrinter/ARMException.cpp - ARM EHABI Exception Impl ----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing DWARF exception info into asm files. // //===----------------------------------------------------------------------===// #include "DwarfException.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Twine.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Mangler.h" #include "llvm/IR/Module.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCSection.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/FormattedStream.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetRegisterInfo.h" using namespace llvm; ARMException::ARMException(AsmPrinter *A) : DwarfCFIExceptionBase(A) {} ARMException::~ARMException() {} ARMTargetStreamer &ARMException::getTargetStreamer() { MCTargetStreamer &TS = *Asm->OutStreamer->getTargetStreamer(); return static_cast<ARMTargetStreamer &>(TS); } /// endModule - Emit all exception information that should come after the /// content. void ARMException::endModule() { if (shouldEmitCFI) Asm->OutStreamer->EmitCFISections(false, true); } void ARMException::beginFunction(const MachineFunction *MF) { if (Asm->MAI->getExceptionHandlingType() == ExceptionHandling::ARM) getTargetStreamer().emitFnStart(); // See if we need call frame info. AsmPrinter::CFIMoveType MoveType = Asm->needsCFIMoves(); assert(MoveType != AsmPrinter::CFI_M_EH && "non-EH CFI not yet supported in prologue with EHABI lowering"); if (MoveType == AsmPrinter::CFI_M_Debug) { shouldEmitCFI = true; Asm->OutStreamer->EmitCFIStartProc(false); } } /// endFunction - Gather and emit post-function exception information. /// void ARMException::endFunction(const MachineFunction *MF) { ARMTargetStreamer &ATS = getTargetStreamer(); const Function *F = MF->getFunction(); const Function *Per = nullptr; if (F->hasPersonalityFn()) Per = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts()); assert(!MMI->getPersonality() || Per == MMI->getPersonality()); bool forceEmitPersonality = F->hasPersonalityFn() && !isNoOpWithoutInvoke(classifyEHPersonality(Per)) && F->needsUnwindTableEntry(); bool shouldEmitPersonality = forceEmitPersonality || !MMI->getLandingPads().empty(); if (!Asm->MF->getFunction()->needsUnwindTableEntry() && !shouldEmitPersonality) ATS.emitCantUnwind(); else if (shouldEmitPersonality) { // Emit references to personality. if (Per) { MCSymbol *PerSym = Asm->getSymbol(Per); Asm->OutStreamer->EmitSymbolAttribute(PerSym, MCSA_Global); ATS.emitPersonality(PerSym); } // Emit .handlerdata directive. ATS.emitHandlerData(); // Emit actual exception table emitExceptionTable(); } if (Asm->MAI->getExceptionHandlingType() == ExceptionHandling::ARM) ATS.emitFnEnd(); } void ARMException::emitTypeInfos(unsigned TTypeEncoding) { const std::vector<const GlobalValue *> &TypeInfos = MMI->getTypeInfos(); const std::vector<unsigned> &FilterIds = MMI->getFilterIds(); bool VerboseAsm = Asm->OutStreamer->isVerboseAsm(); int Entry = 0; // Emit the Catch TypeInfos. if (VerboseAsm && !TypeInfos.empty()) { Asm->OutStreamer->AddComment(">> Catch TypeInfos <<"); Asm->OutStreamer->AddBlankLine(); Entry = TypeInfos.size(); } for (const GlobalValue *GV : reverse(TypeInfos)) { if (VerboseAsm) Asm->OutStreamer->AddComment("TypeInfo " + Twine(Entry--)); Asm->EmitTTypeReference(GV, TTypeEncoding); } // Emit the Exception Specifications. if (VerboseAsm && !FilterIds.empty()) { Asm->OutStreamer->AddComment(">> Filter TypeInfos <<"); Asm->OutStreamer->AddBlankLine(); Entry = 0; } for (std::vector<unsigned>::const_iterator I = FilterIds.begin(), E = FilterIds.end(); I < E; ++I) { unsigned TypeID = *I; if (VerboseAsm) { --Entry; if (TypeID != 0) Asm->OutStreamer->AddComment("FilterInfo " + Twine(Entry)); } Asm->EmitTTypeReference((TypeID == 0 ? nullptr : TypeInfos[TypeID - 1]), TTypeEncoding); } }
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/WinException.cpp
//===-- CodeGen/AsmPrinter/WinException.cpp - Dwarf Exception Impl ------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing Win64 exception info into asm files. // //===----------------------------------------------------------------------===// #include "WinException.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Twine.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/WinEHFuncInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Mangler.h" #include "llvm/IR/Module.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCSection.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCWin64EH.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FormattedStream.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetRegisterInfo.h" using namespace llvm; WinException::WinException(AsmPrinter *A) : EHStreamer(A) { // MSVC's EH tables are always composed of 32-bit words. All known 64-bit // platforms use an imagerel32 relocation to refer to symbols. useImageRel32 = (A->getDataLayout().getPointerSizeInBits() == 64); } WinException::~WinException() {} /// endModule - Emit all exception information that should come after the /// content. void WinException::endModule() { auto &OS = *Asm->OutStreamer; const Module *M = MMI->getModule(); for (const Function &F : *M) if (F.hasFnAttribute("safeseh")) OS.EmitCOFFSafeSEH(Asm->getSymbol(&F)); } void WinException::beginFunction(const MachineFunction *MF) { shouldEmitMoves = shouldEmitPersonality = shouldEmitLSDA = false; // If any landing pads survive, we need an EH table. bool hasLandingPads = !MMI->getLandingPads().empty(); const Function *F = MF->getFunction(); const Function *ParentF = MMI->getWinEHParent(F); shouldEmitMoves = Asm->needsSEHMoves(); const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering(); unsigned PerEncoding = TLOF.getPersonalityEncoding(); const Function *Per = nullptr; if (F->hasPersonalityFn()) Per = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts()); bool forceEmitPersonality = F->hasPersonalityFn() && !isNoOpWithoutInvoke(classifyEHPersonality(Per)) && F->needsUnwindTableEntry(); shouldEmitPersonality = forceEmitPersonality || (hasLandingPads && PerEncoding != dwarf::DW_EH_PE_omit && Per); unsigned LSDAEncoding = TLOF.getLSDAEncoding(); shouldEmitLSDA = shouldEmitPersonality && LSDAEncoding != dwarf::DW_EH_PE_omit; // If we're not using CFI, we don't want the CFI or the personality. If // WinEHPrepare outlined something, we should emit the LSDA. if (!Asm->MAI->usesWindowsCFI()) { bool HasOutlinedChildren = F->hasFnAttribute("wineh-parent") && F == ParentF; shouldEmitLSDA = HasOutlinedChildren; shouldEmitPersonality = false; return; } // If this was an outlined handler, we need to define the label corresponding // to the offset of the parent frame relative to the stack pointer after the // prologue. if (F != ParentF) { WinEHFuncInfo &FuncInfo = MMI->getWinEHFuncInfo(ParentF); auto I = FuncInfo.CatchHandlerParentFrameObjOffset.find(F); if (I != FuncInfo.CatchHandlerParentFrameObjOffset.end()) { MCSymbol *HandlerTypeParentFrameOffset = Asm->OutContext.getOrCreateParentFrameOffsetSymbol( GlobalValue::getRealLinkageName(F->getName())); // Emit a symbol assignment. Asm->OutStreamer->EmitAssignment( HandlerTypeParentFrameOffset, MCConstantExpr::create(I->second, Asm->OutContext)); } } if (shouldEmitMoves || shouldEmitPersonality) Asm->OutStreamer->EmitWinCFIStartProc(Asm->CurrentFnSym); if (shouldEmitPersonality) { const MCSymbol *PersHandlerSym = TLOF.getCFIPersonalitySymbol(Per, *Asm->Mang, Asm->TM, MMI); Asm->OutStreamer->EmitWinEHHandler(PersHandlerSym, true, true); } } /// endFunction - Gather and emit post-function exception information. /// void WinException::endFunction(const MachineFunction *MF) { if (!shouldEmitPersonality && !shouldEmitMoves && !shouldEmitLSDA) return; const Function *F = MF->getFunction(); EHPersonality Per = EHPersonality::Unknown; if (F->hasPersonalityFn()) Per = classifyEHPersonality(F->getPersonalityFn()); // Get rid of any dead landing pads if we're not using a Windows EH scheme. In // Windows EH schemes, the landing pad is not actually reachable. It only // exists so that we can emit the right table data. if (!isMSVCEHPersonality(Per)) MMI->TidyLandingPads(); if (shouldEmitPersonality || shouldEmitLSDA) { Asm->OutStreamer->PushSection(); if (shouldEmitMoves || shouldEmitPersonality) { // Emit an UNWIND_INFO struct describing the prologue. Asm->OutStreamer->EmitWinEHHandlerData(); } else { // Just switch sections to the right xdata section. This use of // CurrentFnSym assumes that we only emit the LSDA when ending the parent // function. MCSection *XData = WinEH::UnwindEmitter::getXDataSection( Asm->CurrentFnSym, Asm->OutContext); Asm->OutStreamer->SwitchSection(XData); } // Emit the tables appropriate to the personality function in use. If we // don't recognize the personality, assume it uses an Itanium-style LSDA. if (Per == EHPersonality::MSVC_Win64SEH) emitCSpecificHandlerTable(); else if (Per == EHPersonality::MSVC_X86SEH) emitExceptHandlerTable(MF); else if (Per == EHPersonality::MSVC_CXX) emitCXXFrameHandler3Table(MF); else emitExceptionTable(); Asm->OutStreamer->PopSection(); } if (shouldEmitMoves) Asm->OutStreamer->EmitWinCFIEndProc(); } const MCExpr *WinException::create32bitRef(const MCSymbol *Value) { if (!Value) return MCConstantExpr::create(0, Asm->OutContext); return MCSymbolRefExpr::create(Value, useImageRel32 ? MCSymbolRefExpr::VK_COFF_IMGREL32 : MCSymbolRefExpr::VK_None, Asm->OutContext); } const MCExpr *WinException::create32bitRef(const GlobalValue *GV) { if (!GV) return MCConstantExpr::create(0, Asm->OutContext); return create32bitRef(Asm->getSymbol(GV)); } /// Emit the language-specific data that __C_specific_handler expects. This /// handler lives in the x64 Microsoft C runtime and allows catching or cleaning /// up after faults with __try, __except, and __finally. The typeinfo values /// are not really RTTI data, but pointers to filter functions that return an /// integer (1, 0, or -1) indicating how to handle the exception. For __finally /// blocks and other cleanups, the landing pad label is zero, and the filter /// function is actually a cleanup handler with the same prototype. A catch-all /// entry is modeled with a null filter function field and a non-zero landing /// pad label. /// /// Possible filter function return values: /// EXCEPTION_EXECUTE_HANDLER (1): /// Jump to the landing pad label after cleanups. /// EXCEPTION_CONTINUE_SEARCH (0): /// Continue searching this table or continue unwinding. /// EXCEPTION_CONTINUE_EXECUTION (-1): /// Resume execution at the trapping PC. /// /// Inferred table structure: /// struct Table { /// int NumEntries; /// struct Entry { /// imagerel32 LabelStart; /// imagerel32 LabelEnd; /// imagerel32 FilterOrFinally; // One means catch-all. /// imagerel32 LabelLPad; // Zero means __finally. /// } Entries[NumEntries]; /// }; void WinException::emitCSpecificHandlerTable() { const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads(); // Simplifying assumptions for first implementation: // - Cleanups are not implemented. // - Filters are not implemented. // The Itanium LSDA table sorts similar landing pads together to simplify the // actions table, but we don't need that. SmallVector<const LandingPadInfo *, 64> LandingPads; LandingPads.reserve(PadInfos.size()); for (const auto &LP : PadInfos) LandingPads.push_back(&LP); // Compute label ranges for call sites as we would for the Itanium LSDA, but // use an all zero action table because we aren't using these actions. SmallVector<unsigned, 64> FirstActions; FirstActions.resize(LandingPads.size()); SmallVector<CallSiteEntry, 64> CallSites; computeCallSiteTable(CallSites, LandingPads, FirstActions); MCSymbol *EHFuncBeginSym = Asm->getFunctionBegin(); MCSymbol *EHFuncEndSym = Asm->getFunctionEnd(); // Emit the number of table entries. unsigned NumEntries = 0; for (const CallSiteEntry &CSE : CallSites) { if (!CSE.LPad) continue; // Ignore gaps. NumEntries += CSE.LPad->SEHHandlers.size(); } Asm->OutStreamer->EmitIntValue(NumEntries, 4); // If there are no actions, we don't need to iterate again. if (NumEntries == 0) return; // Emit the four-label records for each call site entry. The table has to be // sorted in layout order, and the call sites should already be sorted. for (const CallSiteEntry &CSE : CallSites) { // Ignore gaps. Unlike the Itanium model, unwinding through a frame without // an EH table entry will propagate the exception rather than terminating // the program. if (!CSE.LPad) continue; const LandingPadInfo *LPad = CSE.LPad; // Compute the label range. We may reuse the function begin and end labels // rather than forming new ones. const MCExpr *Begin = create32bitRef(CSE.BeginLabel ? CSE.BeginLabel : EHFuncBeginSym); const MCExpr *End; if (CSE.EndLabel) { // The interval is half-open, so we have to add one to include the return // address of the last invoke in the range. End = MCBinaryExpr::createAdd(create32bitRef(CSE.EndLabel), MCConstantExpr::create(1, Asm->OutContext), Asm->OutContext); } else { End = create32bitRef(EHFuncEndSym); } // Emit an entry for each action. for (SEHHandler Handler : LPad->SEHHandlers) { Asm->OutStreamer->EmitValue(Begin, 4); Asm->OutStreamer->EmitValue(End, 4); // Emit the filter or finally function pointer, if present. Otherwise, // emit '1' to indicate a catch-all. const Function *F = Handler.FilterOrFinally; if (F) Asm->OutStreamer->EmitValue(create32bitRef(Asm->getSymbol(F)), 4); else Asm->OutStreamer->EmitIntValue(1, 4); // Emit the recovery address, if present. Otherwise, this must be a // finally. const BlockAddress *BA = Handler.RecoverBA; if (BA) Asm->OutStreamer->EmitValue( create32bitRef(Asm->GetBlockAddressSymbol(BA)), 4); else Asm->OutStreamer->EmitIntValue(0, 4); } } } void WinException::emitCXXFrameHandler3Table(const MachineFunction *MF) { const Function *F = MF->getFunction(); const Function *ParentF = MMI->getWinEHParent(F); auto &OS = *Asm->OutStreamer; WinEHFuncInfo &FuncInfo = MMI->getWinEHFuncInfo(ParentF); StringRef ParentLinkageName = GlobalValue::getRealLinkageName(ParentF->getName()); MCSymbol *FuncInfoXData = nullptr; if (shouldEmitPersonality) { FuncInfoXData = Asm->OutContext.getOrCreateSymbol( Twine("$cppxdata$", ParentLinkageName)); OS.EmitValue(create32bitRef(FuncInfoXData), 4); extendIP2StateTable(MF, ParentF, FuncInfo); // Defer emission until we've visited the parent function and all the catch // handlers. Cleanups don't contribute to the ip2state table, so don't count // them. if (ParentF != F && !FuncInfo.CatchHandlerMaxState.count(F)) return; ++FuncInfo.NumIPToStateFuncsVisited; if (FuncInfo.NumIPToStateFuncsVisited != FuncInfo.CatchHandlerMaxState.size()) return; } else { FuncInfoXData = Asm->OutContext.getOrCreateLSDASymbol(ParentLinkageName); emitEHRegistrationOffsetLabel(FuncInfo, ParentLinkageName); } MCSymbol *UnwindMapXData = nullptr; MCSymbol *TryBlockMapXData = nullptr; MCSymbol *IPToStateXData = nullptr; if (!FuncInfo.UnwindMap.empty()) UnwindMapXData = Asm->OutContext.getOrCreateSymbol( Twine("$stateUnwindMap$", ParentLinkageName)); if (!FuncInfo.TryBlockMap.empty()) TryBlockMapXData = Asm->OutContext.getOrCreateSymbol( Twine("$tryMap$", ParentLinkageName)); if (!FuncInfo.IPToStateList.empty()) IPToStateXData = Asm->OutContext.getOrCreateSymbol( Twine("$ip2state$", ParentLinkageName)); // FuncInfo { // uint32_t MagicNumber // int32_t MaxState; // UnwindMapEntry *UnwindMap; // uint32_t NumTryBlocks; // TryBlockMapEntry *TryBlockMap; // uint32_t IPMapEntries; // always 0 for x86 // IPToStateMapEntry *IPToStateMap; // always 0 for x86 // uint32_t UnwindHelp; // non-x86 only // ESTypeList *ESTypeList; // int32_t EHFlags; // } // EHFlags & 1 -> Synchronous exceptions only, no async exceptions. // EHFlags & 2 -> ??? // EHFlags & 4 -> The function is noexcept(true), unwinding can't continue. OS.EmitValueToAlignment(4); OS.EmitLabel(FuncInfoXData); OS.EmitIntValue(0x19930522, 4); // MagicNumber OS.EmitIntValue(FuncInfo.UnwindMap.size(), 4); // MaxState OS.EmitValue(create32bitRef(UnwindMapXData), 4); // UnwindMap OS.EmitIntValue(FuncInfo.TryBlockMap.size(), 4); // NumTryBlocks OS.EmitValue(create32bitRef(TryBlockMapXData), 4); // TryBlockMap OS.EmitIntValue(FuncInfo.IPToStateList.size(), 4); // IPMapEntries OS.EmitValue(create32bitRef(IPToStateXData), 4); // IPToStateMap if (Asm->MAI->usesWindowsCFI()) OS.EmitIntValue(FuncInfo.UnwindHelpFrameOffset, 4); // UnwindHelp OS.EmitIntValue(0, 4); // ESTypeList OS.EmitIntValue(1, 4); // EHFlags // UnwindMapEntry { // int32_t ToState; // void (*Action)(); // }; if (UnwindMapXData) { OS.EmitLabel(UnwindMapXData); for (const WinEHUnwindMapEntry &UME : FuncInfo.UnwindMap) { OS.EmitIntValue(UME.ToState, 4); // ToState OS.EmitValue(create32bitRef(UME.Cleanup), 4); // Action } } // TryBlockMap { // int32_t TryLow; // int32_t TryHigh; // int32_t CatchHigh; // int32_t NumCatches; // HandlerType *HandlerArray; // }; if (TryBlockMapXData) { OS.EmitLabel(TryBlockMapXData); SmallVector<MCSymbol *, 1> HandlerMaps; for (size_t I = 0, E = FuncInfo.TryBlockMap.size(); I != E; ++I) { WinEHTryBlockMapEntry &TBME = FuncInfo.TryBlockMap[I]; MCSymbol *HandlerMapXData = nullptr; if (!TBME.HandlerArray.empty()) HandlerMapXData = Asm->OutContext.getOrCreateSymbol(Twine("$handlerMap$") .concat(Twine(I)) .concat("$") .concat(ParentLinkageName)); HandlerMaps.push_back(HandlerMapXData); int CatchHigh = -1; for (WinEHHandlerType &HT : TBME.HandlerArray) CatchHigh = std::max(CatchHigh, FuncInfo.CatchHandlerMaxState[HT.Handler]); assert(TBME.TryLow <= TBME.TryHigh); OS.EmitIntValue(TBME.TryLow, 4); // TryLow OS.EmitIntValue(TBME.TryHigh, 4); // TryHigh OS.EmitIntValue(CatchHigh, 4); // CatchHigh OS.EmitIntValue(TBME.HandlerArray.size(), 4); // NumCatches OS.EmitValue(create32bitRef(HandlerMapXData), 4); // HandlerArray } for (size_t I = 0, E = FuncInfo.TryBlockMap.size(); I != E; ++I) { WinEHTryBlockMapEntry &TBME = FuncInfo.TryBlockMap[I]; MCSymbol *HandlerMapXData = HandlerMaps[I]; if (!HandlerMapXData) continue; // HandlerType { // int32_t Adjectives; // TypeDescriptor *Type; // int32_t CatchObjOffset; // void (*Handler)(); // int32_t ParentFrameOffset; // x64 only // }; OS.EmitLabel(HandlerMapXData); for (const WinEHHandlerType &HT : TBME.HandlerArray) { // Get the frame escape label with the offset of the catch object. If // the index is -1, then there is no catch object, and we should emit an // offset of zero, indicating that no copy will occur. const MCExpr *FrameAllocOffsetRef = nullptr; if (HT.CatchObjRecoverIdx >= 0) { MCSymbol *FrameAllocOffset = Asm->OutContext.getOrCreateFrameAllocSymbol( GlobalValue::getRealLinkageName(ParentF->getName()), HT.CatchObjRecoverIdx); FrameAllocOffsetRef = MCSymbolRefExpr::create( FrameAllocOffset, MCSymbolRefExpr::VK_None, Asm->OutContext); } else { FrameAllocOffsetRef = MCConstantExpr::create(0, Asm->OutContext); } OS.EmitIntValue(HT.Adjectives, 4); // Adjectives OS.EmitValue(create32bitRef(HT.TypeDescriptor), 4); // Type OS.EmitValue(FrameAllocOffsetRef, 4); // CatchObjOffset OS.EmitValue(create32bitRef(HT.Handler), 4); // Handler if (shouldEmitPersonality) { MCSymbol *ParentFrameOffset = Asm->OutContext.getOrCreateParentFrameOffsetSymbol( GlobalValue::getRealLinkageName(HT.Handler->getName())); const MCSymbolRefExpr *ParentFrameOffsetRef = MCSymbolRefExpr::create( ParentFrameOffset, Asm->OutContext); OS.EmitValue(ParentFrameOffsetRef, 4); // ParentFrameOffset } } } } // IPToStateMapEntry { // void *IP; // int32_t State; // }; if (IPToStateXData) { OS.EmitLabel(IPToStateXData); for (auto &IPStatePair : FuncInfo.IPToStateList) { OS.EmitValue(create32bitRef(IPStatePair.first), 4); // IP OS.EmitIntValue(IPStatePair.second, 4); // State } } } void WinException::extendIP2StateTable(const MachineFunction *MF, const Function *ParentF, WinEHFuncInfo &FuncInfo) { const Function *F = MF->getFunction(); // The Itanium LSDA table sorts similar landing pads together to simplify the // actions table, but we don't need that. SmallVector<const LandingPadInfo *, 64> LandingPads; const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads(); LandingPads.reserve(PadInfos.size()); for (const auto &LP : PadInfos) LandingPads.push_back(&LP); RangeMapType PadMap; computePadMap(LandingPads, PadMap); // The end label of the previous invoke or nounwind try-range. MCSymbol *LastLabel = Asm->getFunctionBegin(); // Whether there is a potentially throwing instruction (currently this means // an ordinary call) between the end of the previous try-range and now. bool SawPotentiallyThrowing = false; int LastEHState = -2; // The parent function and the catch handlers contribute to the 'ip2state' // table. // Include ip2state entries for the beginning of the main function and // for catch handler functions. if (F == ParentF) { FuncInfo.IPToStateList.push_back(std::make_pair(LastLabel, -1)); LastEHState = -1; } else if (FuncInfo.HandlerBaseState.count(F)) { FuncInfo.IPToStateList.push_back( std::make_pair(LastLabel, FuncInfo.HandlerBaseState[F])); LastEHState = FuncInfo.HandlerBaseState[F]; } for (const auto &MBB : *MF) { for (const auto &MI : MBB) { if (!MI.isEHLabel()) { if (MI.isCall()) SawPotentiallyThrowing |= !callToNoUnwindFunction(&MI); continue; } // End of the previous try-range? MCSymbol *BeginLabel = MI.getOperand(0).getMCSymbol(); if (BeginLabel == LastLabel) SawPotentiallyThrowing = false; // Beginning of a new try-range? RangeMapType::const_iterator L = PadMap.find(BeginLabel); if (L == PadMap.end()) // Nope, it was just some random label. continue; const PadRange &P = L->second; const LandingPadInfo *LandingPad = LandingPads[P.PadIndex]; assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] && "Inconsistent landing pad map!"); // FIXME: Should this be using FuncInfo.HandlerBaseState? if (SawPotentiallyThrowing && LastEHState != -1) { FuncInfo.IPToStateList.push_back(std::make_pair(LastLabel, -1)); SawPotentiallyThrowing = false; LastEHState = -1; } if (LandingPad->WinEHState != LastEHState) FuncInfo.IPToStateList.push_back( std::make_pair(BeginLabel, LandingPad->WinEHState)); LastEHState = LandingPad->WinEHState; LastLabel = LandingPad->EndLabels[P.RangeIndex]; } } } void WinException::emitEHRegistrationOffsetLabel(const WinEHFuncInfo &FuncInfo, StringRef FLinkageName) { // Outlined helpers called by the EH runtime need to know the offset of the EH // registration in order to recover the parent frame pointer. Now that we know // we've code generated the parent, we can emit the label assignment that // those helpers use to get the offset of the registration node. assert(FuncInfo.EHRegNodeEscapeIndex != INT_MAX && "no EH reg node localescape index"); MCSymbol *ParentFrameOffset = Asm->OutContext.getOrCreateParentFrameOffsetSymbol(FLinkageName); MCSymbol *RegistrationOffsetSym = Asm->OutContext.getOrCreateFrameAllocSymbol( FLinkageName, FuncInfo.EHRegNodeEscapeIndex); const MCExpr *RegistrationOffsetSymRef = MCSymbolRefExpr::create(RegistrationOffsetSym, Asm->OutContext); Asm->OutStreamer->EmitAssignment(ParentFrameOffset, RegistrationOffsetSymRef); } /// Emit the language-specific data that _except_handler3 and 4 expect. This is /// functionally equivalent to the __C_specific_handler table, except it is /// indexed by state number instead of IP. void WinException::emitExceptHandlerTable(const MachineFunction *MF) { MCStreamer &OS = *Asm->OutStreamer; const Function *F = MF->getFunction(); StringRef FLinkageName = GlobalValue::getRealLinkageName(F->getName()); WinEHFuncInfo &FuncInfo = MMI->getWinEHFuncInfo(F); emitEHRegistrationOffsetLabel(FuncInfo, FLinkageName); // Emit the __ehtable label that we use for llvm.x86.seh.lsda. MCSymbol *LSDALabel = Asm->OutContext.getOrCreateLSDASymbol(FLinkageName); OS.EmitValueToAlignment(4); OS.EmitLabel(LSDALabel); const Function *Per = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts()); StringRef PerName = Per->getName(); int BaseState = -1; if (PerName == "_except_handler4") { // The LSDA for _except_handler4 starts with this struct, followed by the // scope table: // // struct EH4ScopeTable { // int32_t GSCookieOffset; // int32_t GSCookieXOROffset; // int32_t EHCookieOffset; // int32_t EHCookieXOROffset; // ScopeTableEntry ScopeRecord[]; // }; // // Only the EHCookieOffset field appears to vary, and it appears to be the // offset from the final saved SP value to the retaddr. OS.EmitIntValue(-2, 4); OS.EmitIntValue(0, 4); // FIXME: Calculate. OS.EmitIntValue(9999, 4); OS.EmitIntValue(0, 4); BaseState = -2; } // Build a list of pointers to LandingPadInfos and then sort by WinEHState. const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads(); SmallVector<const LandingPadInfo *, 4> LPads; LPads.reserve((PadInfos.size())); for (const LandingPadInfo &LPInfo : PadInfos) LPads.push_back(&LPInfo); std::sort(LPads.begin(), LPads.end(), [](const LandingPadInfo *L, const LandingPadInfo *R) { return L->WinEHState < R->WinEHState; }); // For each action in each lpad, emit one of these: // struct ScopeTableEntry { // int32_t EnclosingLevel; // int32_t (__cdecl *Filter)(); // void *HandlerOrFinally; // }; // // The "outermost" action will use BaseState as its enclosing level. Each // other action will refer to the previous state as its enclosing level. int CurState = 0; for (const LandingPadInfo *LPInfo : LPads) { int EnclosingLevel = BaseState; assert(CurState + int(LPInfo->SEHHandlers.size()) - 1 == LPInfo->WinEHState && "gaps in the SEH scope table"); for (auto I = LPInfo->SEHHandlers.rbegin(), E = LPInfo->SEHHandlers.rend(); I != E; ++I) { const SEHHandler &Handler = *I; const BlockAddress *BA = Handler.RecoverBA; const Function *F = Handler.FilterOrFinally; assert(F && "cannot catch all in 32-bit SEH without filter function"); const MCExpr *FilterOrNull = create32bitRef(BA ? Asm->getSymbol(F) : nullptr); const MCExpr *ExceptOrFinally = create32bitRef( BA ? Asm->GetBlockAddressSymbol(BA) : Asm->getSymbol(F)); OS.EmitIntValue(EnclosingLevel, 4); OS.EmitValue(FilterOrNull, 4); OS.EmitValue(ExceptOrFinally, 4); // The next state unwinds to this state. EnclosingLevel = CurState; CurState++; } } }
0
repos/DirectXShaderCompiler/lib/CodeGen
repos/DirectXShaderCompiler/lib/CodeGen/AsmPrinter/DwarfException.h
//===-- DwarfException.h - Dwarf Exception Framework -----------*- C++ -*--===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing dwarf exception info into asm files. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DWARFEXCEPTION_H #define LLVM_LIB_CODEGEN_ASMPRINTER_DWARFEXCEPTION_H #include "EHStreamer.h" #include "llvm/CodeGen/AsmPrinter.h" namespace llvm { class MachineFunction; class ARMTargetStreamer; class LLVM_LIBRARY_VISIBILITY DwarfCFIExceptionBase : public EHStreamer { protected: DwarfCFIExceptionBase(AsmPrinter *A); /// Per-function flag to indicate if frame CFI info should be emitted. bool shouldEmitCFI; void markFunctionEnd() override; }; class LLVM_LIBRARY_VISIBILITY DwarfCFIException : public DwarfCFIExceptionBase { /// Per-function flag to indicate if .cfi_personality should be emitted. bool shouldEmitPersonality; /// Per-function flag to indicate if .cfi_lsda should be emitted. bool shouldEmitLSDA; /// Per-function flag to indicate if frame moves info should be emitted. bool shouldEmitMoves; AsmPrinter::CFIMoveType moveTypeModule; public: //===--------------------------------------------------------------------===// // Main entry points. // DwarfCFIException(AsmPrinter *A); ~DwarfCFIException() override; /// Emit all exception information that should come after the content. void endModule() override; /// Gather pre-function exception information. Assumes being emitted /// immediately after the function entry point. void beginFunction(const MachineFunction *MF) override; /// Gather and emit post-function exception information. void endFunction(const MachineFunction *) override; }; class LLVM_LIBRARY_VISIBILITY ARMException : public DwarfCFIExceptionBase { void emitTypeInfos(unsigned TTypeEncoding) override; ARMTargetStreamer &getTargetStreamer(); public: //===--------------------------------------------------------------------===// // Main entry points. // ARMException(AsmPrinter *A); ~ARMException() override; /// Emit all exception information that should come after the content. void endModule() override; /// Gather pre-function exception information. Assumes being emitted /// immediately after the function entry point. void beginFunction(const MachineFunction *MF) override; /// Gather and emit post-function exception information. void endFunction(const MachineFunction *) override; }; } // End of namespace llvm #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerInterface.h
//===- FuzzerInterface.h - Interface header for the Fuzzer ------*- C++ -* ===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Define the interface between the Fuzzer and the library being tested. //===----------------------------------------------------------------------===// // WARNING: keep the interface free of STL or any other header-based C++ lib, // to avoid bad interactions between the code used in the fuzzer and // the code used in the target function. #ifndef LLVM_FUZZER_INTERFACE_H #define LLVM_FUZZER_INTERFACE_H #include <cstddef> #include <cstdint> namespace fuzzer { typedef void (*UserCallback)(const uint8_t *Data, size_t Size); /** Simple C-like interface with a single user-supplied callback. Usage: #\code #include "FuzzerInterface.h" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { DoStuffWithData(Data, Size); } // Implement your own main() or use the one from FuzzerMain.cpp. int main(int argc, char **argv) { InitializeMeIfNeeded(); return fuzzer::FuzzerDriver(argc, argv, LLVMFuzzerTestOneInput); } #\endcode */ int FuzzerDriver(int argc, char **argv, UserCallback Callback); /** An abstract class that allows to use user-supplied mutators with libFuzzer. Usage: #\code #include "FuzzerInterface.h" class MyFuzzer : public fuzzer::UserSuppliedFuzzer { public: // Must define the target function. void TargetFunction(...) { ... } // Optionally define the mutator. size_t Mutate(...) { ... } // Optionally define the CrossOver method. size_t CrossOver(...) { ... } }; int main(int argc, char **argv) { MyFuzzer F; fuzzer::FuzzerDriver(argc, argv, F); } #\endcode */ class UserSuppliedFuzzer { public: /// Executes the target function on 'Size' bytes of 'Data'. virtual void TargetFunction(const uint8_t *Data, size_t Size) = 0; /// Mutates 'Size' bytes of data in 'Data' inplace into up to 'MaxSize' bytes, /// returns the new size of the data, which should be positive. virtual size_t Mutate(uint8_t *Data, size_t Size, size_t MaxSize) { return BasicMutate(Data, Size, MaxSize); } /// Crosses 'Data1' and 'Data2', writes up to 'MaxOutSize' bytes into Out, /// returns the number of bytes written, which should be positive. virtual size_t CrossOver(const uint8_t *Data1, size_t Size1, const uint8_t *Data2, size_t Size2, uint8_t *Out, size_t MaxOutSize) { return BasicCrossOver(Data1, Size1, Data2, Size2, Out, MaxOutSize); } virtual ~UserSuppliedFuzzer() {} protected: /// These can be called internally by Mutate and CrossOver. size_t BasicMutate(uint8_t *Data, size_t Size, size_t MaxSize); size_t BasicCrossOver(const uint8_t *Data1, size_t Size1, const uint8_t *Data2, size_t Size2, uint8_t *Out, size_t MaxOutSize); }; /// Runs the fuzzing with the UserSuppliedFuzzer. int FuzzerDriver(int argc, char **argv, UserSuppliedFuzzer &USF); } // namespace fuzzer #endif // LLVM_FUZZER_INTERFACE_H
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerSanitizerOptions.cpp
//===- FuzzerSanitizerOptions.cpp - default flags for sanitizers ----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Set default options for sanitizers while running the fuzzer. // Options reside in a separate file, so if we don't want to set the default // options we simply do not link this file in. // ASAN options: // * don't dump the coverage to disk. // * enable coverage by default. // * enable handle_abort. //===----------------------------------------------------------------------===// extern "C" const char *__asan_default_options() { return "coverage_pcs=0:coverage=1:handle_abort=1"; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerMain.cpp
//===- FuzzerMain.cpp - main() function and flags -------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // main() and flags. //===----------------------------------------------------------------------===// #include "FuzzerInterface.h" #include "FuzzerInternal.h" // This function should be defined by the user. extern "C" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); int main(int argc, char **argv) { return fuzzer::FuzzerDriver(argc, argv, LLVMFuzzerTestOneInput); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerDriver.cpp
//===- FuzzerDriver.cpp - FuzzerDriver function and flags -----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // FuzzerDriver and flag parsing. //===----------------------------------------------------------------------===// #include "FuzzerInterface.h" #include "FuzzerInternal.h" #include <cstring> #include <chrono> #include <unistd.h> #include <thread> #include <atomic> #include <mutex> #include <string> #include <sstream> #include <algorithm> #include <iterator> namespace fuzzer { // Program arguments. struct FlagDescription { const char *Name; const char *Description; int Default; int *IntFlag; const char **StrFlag; }; struct { #define FUZZER_FLAG_INT(Name, Default, Description) int Name; #define FUZZER_FLAG_STRING(Name, Description) const char *Name; #include "FuzzerFlags.def" #undef FUZZER_FLAG_INT #undef FUZZER_FLAG_STRING } Flags; static FlagDescription FlagDescriptions [] { #define FUZZER_FLAG_INT(Name, Default, Description) \ { #Name, Description, Default, &Flags.Name, nullptr}, #define FUZZER_FLAG_STRING(Name, Description) \ { #Name, Description, 0, nullptr, &Flags.Name }, #include "FuzzerFlags.def" #undef FUZZER_FLAG_INT #undef FUZZER_FLAG_STRING }; static const size_t kNumFlags = sizeof(FlagDescriptions) / sizeof(FlagDescriptions[0]); static std::vector<std::string> inputs; static const char *ProgName; static void PrintHelp() { Printf("Usage: %s [-flag1=val1 [-flag2=val2 ...] ] [dir1 [dir2 ...] ]\n", ProgName); Printf("\nFlags: (strictly in form -flag=value)\n"); size_t MaxFlagLen = 0; for (size_t F = 0; F < kNumFlags; F++) MaxFlagLen = std::max(strlen(FlagDescriptions[F].Name), MaxFlagLen); for (size_t F = 0; F < kNumFlags; F++) { const auto &D = FlagDescriptions[F]; Printf(" %s", D.Name); for (size_t i = 0, n = MaxFlagLen - strlen(D.Name); i < n; i++) Printf(" "); Printf("\t"); Printf("%d\t%s\n", D.Default, D.Description); } Printf("\nFlags starting with '--' will be ignored and " "will be passed verbatim to subprocesses.\n"); } static const char *FlagValue(const char *Param, const char *Name) { size_t Len = strlen(Name); if (Param[0] == '-' && strstr(Param + 1, Name) == Param + 1 && Param[Len + 1] == '=') return &Param[Len + 2]; return nullptr; } static bool ParseOneFlag(const char *Param) { if (Param[0] != '-') return false; if (Param[1] == '-') { static bool PrintedWarning = false; if (!PrintedWarning) { PrintedWarning = true; Printf("WARNING: libFuzzer ignores flags that start with '--'\n"); } return true; } for (size_t F = 0; F < kNumFlags; F++) { const char *Name = FlagDescriptions[F].Name; const char *Str = FlagValue(Param, Name); if (Str) { if (FlagDescriptions[F].IntFlag) { int Val = std::stol(Str); *FlagDescriptions[F].IntFlag = Val; if (Flags.verbosity >= 2) Printf("Flag: %s %d\n", Name, Val);; return true; } else if (FlagDescriptions[F].StrFlag) { *FlagDescriptions[F].StrFlag = Str; if (Flags.verbosity >= 2) Printf("Flag: %s %s\n", Name, Str); return true; } } } PrintHelp(); exit(1); } // We don't use any library to minimize dependencies. static void ParseFlags(int argc, char **argv) { for (size_t F = 0; F < kNumFlags; F++) { if (FlagDescriptions[F].IntFlag) *FlagDescriptions[F].IntFlag = FlagDescriptions[F].Default; if (FlagDescriptions[F].StrFlag) *FlagDescriptions[F].StrFlag = nullptr; } for (int A = 1; A < argc; A++) { if (ParseOneFlag(argv[A])) continue; inputs.push_back(argv[A]); } } static std::mutex Mu; static void PulseThread() { while (true) { std::this_thread::sleep_for(std::chrono::seconds(600)); std::lock_guard<std::mutex> Lock(Mu); Printf("pulse...\n"); } } static void WorkerThread(const std::string &Cmd, std::atomic<int> *Counter, int NumJobs, std::atomic<bool> *HasErrors) { while (true) { int C = (*Counter)++; if (C >= NumJobs) break; std::string Log = "fuzz-" + std::to_string(C) + ".log"; std::string ToRun = Cmd + " > " + Log + " 2>&1\n"; if (Flags.verbosity) Printf("%s", ToRun.c_str()); int ExitCode = system(ToRun.c_str()); if (ExitCode != 0) *HasErrors = true; std::lock_guard<std::mutex> Lock(Mu); Printf("================== Job %d exited with exit code %d ============\n", C, ExitCode); fuzzer::CopyFileToErr(Log); } } static int RunInMultipleProcesses(int argc, char **argv, int NumWorkers, int NumJobs) { std::atomic<int> Counter(0); std::atomic<bool> HasErrors(false); std::string Cmd; for (int i = 0; i < argc; i++) { if (FlagValue(argv[i], "jobs") || FlagValue(argv[i], "workers")) continue; Cmd += argv[i]; Cmd += " "; } std::vector<std::thread> V; std::thread Pulse(PulseThread); Pulse.detach(); for (int i = 0; i < NumWorkers; i++) V.push_back(std::thread(WorkerThread, Cmd, &Counter, NumJobs, &HasErrors)); for (auto &T : V) T.join(); return HasErrors ? 1 : 0; } std::vector<std::string> ReadTokensFile(const char *TokensFilePath) { if (!TokensFilePath) return {}; std::string TokensFileContents = FileToString(TokensFilePath); std::istringstream ISS(TokensFileContents); std::vector<std::string> Res = {std::istream_iterator<std::string>{ISS}, std::istream_iterator<std::string>{}}; Res.push_back(" "); Res.push_back("\t"); Res.push_back("\n"); return Res; } int ApplyTokens(const Fuzzer &F, const char *InputFilePath) { Unit U = FileToVector(InputFilePath); auto T = F.SubstituteTokens(U); T.push_back(0); Printf("%s", T.data()); return 0; } int FuzzerDriver(int argc, char **argv, UserCallback Callback) { SimpleUserSuppliedFuzzer SUSF(Callback); return FuzzerDriver(argc, argv, SUSF); } int FuzzerDriver(int argc, char **argv, UserSuppliedFuzzer &USF) { using namespace fuzzer; ProgName = argv[0]; ParseFlags(argc, argv); if (Flags.help) { PrintHelp(); return 0; } if (Flags.jobs > 0 && Flags.workers == 0) { Flags.workers = std::min(NumberOfCpuCores() / 2, Flags.jobs); if (Flags.workers > 1) Printf("Running %d workers\n", Flags.workers); } if (Flags.workers > 0 && Flags.jobs > 0) return RunInMultipleProcesses(argc, argv, Flags.workers, Flags.jobs); Fuzzer::FuzzingOptions Options; Options.Verbosity = Flags.verbosity; Options.MaxLen = Flags.max_len; Options.UnitTimeoutSec = Flags.timeout; Options.DoCrossOver = Flags.cross_over; Options.MutateDepth = Flags.mutate_depth; Options.ExitOnFirst = Flags.exit_on_first; Options.UseCounters = Flags.use_counters; Options.UseTraces = Flags.use_traces; Options.UseFullCoverageSet = Flags.use_full_coverage_set; Options.PreferSmallDuringInitialShuffle = Flags.prefer_small_during_initial_shuffle; Options.Tokens = ReadTokensFile(Flags.tokens); Options.Reload = Flags.reload; if (Flags.runs >= 0) Options.MaxNumberOfRuns = Flags.runs; if (!inputs.empty()) Options.OutputCorpus = inputs[0]; if (Flags.sync_command) Options.SyncCommand = Flags.sync_command; Options.SyncTimeout = Flags.sync_timeout; Fuzzer F(USF, Options); if (Flags.apply_tokens) return ApplyTokens(F, Flags.apply_tokens); unsigned Seed = Flags.seed; // Initialize Seed. if (Seed == 0) Seed = time(0) * 10000 + getpid(); if (Flags.verbosity) Printf("Seed: %u\n", Seed); srand(Seed); // Timer if (Flags.timeout > 0) SetTimer(Flags.timeout / 2 + 1); if (Flags.verbosity >= 2) { Printf("Tokens: {"); for (auto &T : Options.Tokens) Printf("%s,", T.c_str()); Printf("}\n"); } F.RereadOutputCorpus(); for (auto &inp : inputs) if (inp != Options.OutputCorpus) F.ReadDir(inp, nullptr); if (F.CorpusSize() == 0) F.AddToCorpus(Unit()); // Can't fuzz empty corpus, so add an empty input. F.ShuffleAndMinimize(); if (Flags.save_minimized_corpus) F.SaveCorpus(); F.Loop(Flags.iterations < 0 ? INT_MAX : Flags.iterations); if (Flags.verbosity) Printf("Done %d runs in %zd second(s)\n", F.getTotalNumberOfRuns(), F.secondsSinceProcessStartUp()); return 0; } } // namespace fuzzer
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerMutate.cpp
//===- FuzzerMutate.cpp - Mutate a test input -----------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Mutate a test input. //===----------------------------------------------------------------------===// #include <cstring> #include "FuzzerInternal.h" namespace fuzzer { static char FlipRandomBit(char X) { int Bit = rand() % 8; char Mask = 1 << Bit; char R; if (X & (1 << Bit)) R = X & ~Mask; else R = X | Mask; assert(R != X); return R; } static char RandCh() { if (rand() % 2) return rand(); const char *Special = "!*'();:@&=+$,/?%#[]123ABCxyz-`~."; return Special[rand() % (sizeof(Special) - 1)]; } // Mutates Data in place, returns new size. size_t Mutate(uint8_t *Data, size_t Size, size_t MaxSize) { assert(MaxSize > 0); assert(Size <= MaxSize); if (Size == 0) { for (size_t i = 0; i < MaxSize; i++) Data[i] = RandCh(); return MaxSize; } assert(Size > 0); size_t Idx = rand() % Size; switch (rand() % 3) { case 0: if (Size > 1) { // Erase Data[Idx]. memmove(Data + Idx, Data + Idx + 1, Size - Idx - 1); Size = Size - 1; } [[clang::fallthrough]]; case 1: if (Size < MaxSize) { // Insert new value at Data[Idx]. memmove(Data + Idx + 1, Data + Idx, Size - Idx); Data[Idx] = RandCh(); } Data[Idx] = RandCh(); break; case 2: Data[Idx] = FlipRandomBit(Data[Idx]); break; } assert(Size > 0); return Size; } } // namespace fuzzer
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerIO.cpp
//===- FuzzerIO.cpp - IO utils. -------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // IO functions. //===----------------------------------------------------------------------===// #include "FuzzerInternal.h" #include <iterator> #include <fstream> #include <dirent.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <cstdio> // // /////////////////////////////////////////////////////////////////////////////// namespace fuzzer { static long GetEpoch(const std::string &Path) { struct stat St; if (stat(Path.c_str(), &St)) return 0; return St.st_mtime; } static std::vector<std::string> ListFilesInDir(const std::string &Dir, long *Epoch) { std::vector<std::string> V; if (Epoch) { auto E = GetEpoch(Dir.c_str()); if (*Epoch >= E) return V; *Epoch = E; } DIR *D = opendir(Dir.c_str()); if (!D) return V; while (auto E = readdir(D)) { if (E->d_type == DT_REG || E->d_type == DT_LNK) V.push_back(E->d_name); } closedir(D); return V; } Unit FileToVector(const std::string &Path) { std::ifstream T(Path); return Unit((std::istreambuf_iterator<char>(T)), std::istreambuf_iterator<char>()); } std::string FileToString(const std::string &Path) { std::ifstream T(Path); return std::string((std::istreambuf_iterator<char>(T)), std::istreambuf_iterator<char>()); } void CopyFileToErr(const std::string &Path) { Printf("%s", FileToString(Path).c_str()); } void WriteToFile(const Unit &U, const std::string &Path) { std::ofstream OF(Path); OF.write((const char*)U.data(), U.size()); } void ReadDirToVectorOfUnits(const char *Path, std::vector<Unit> *V, long *Epoch) { long E = Epoch ? *Epoch : 0; for (auto &X : ListFilesInDir(Path, Epoch)) { auto FilePath = DirPlusFile(Path, X); if (Epoch && GetEpoch(FilePath) < E) continue; V->push_back(FileToVector(FilePath)); } } std::string DirPlusFile(const std::string &DirPath, const std::string &FileName) { return DirPath + "/" + FileName; } void PrintFileAsBase64(const std::string &Path) { std::string Cmd = "base64 -w 0 < " + Path + "; echo"; ExecuteCommand(Cmd); } void Printf(const char *Fmt, ...) { va_list ap; va_start(ap, Fmt); vfprintf(stderr, Fmt, ap); va_end(ap); } } // namespace fuzzer
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerSHA1.cpp
//===- FuzzerSHA1.h - Private copy of the SHA1 implementation ---*- C++ -* ===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // This code is taken from public domain // (http://oauth.googlecode.com/svn/code/c/liboauth/src/sha1.c) // and modified by adding anonymous namespace, adding an interface // function fuzzer::ComputeSHA1() and removing unnecessary code. // // lib/Fuzzer can not use SHA1 implementation from openssl because // openssl may not be available and because we may be fuzzing openssl itself. // For the same reason we do not want to depend on SHA1 from LLVM tree. //===----------------------------------------------------------------------===// #include "FuzzerInternal.h" /* This code is public-domain - it is based on libcrypt * placed in the public domain by Wei Dai and other contributors. */ #include <stdint.h> #include <string.h> namespace { // Added for LibFuzzer #ifdef __BIG_ENDIAN__ # define SHA_BIG_ENDIAN #elif defined __LITTLE_ENDIAN__ /* override */ #elif defined __BYTE_ORDER # if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ # define SHA_BIG_ENDIAN # endif #else // ! defined __LITTLE_ENDIAN__ # include <endian.h> // machine/endian.h # if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ # define SHA_BIG_ENDIAN # endif #endif /* header */ #define HASH_LENGTH 20 #define BLOCK_LENGTH 64 typedef struct sha1nfo { uint32_t buffer[BLOCK_LENGTH/4]; uint32_t state[HASH_LENGTH/4]; uint32_t byteCount; uint8_t bufferOffset; uint8_t keyBuffer[BLOCK_LENGTH]; uint8_t innerHash[HASH_LENGTH]; } sha1nfo; /* public API - prototypes - TODO: doxygen*/ /** */ void sha1_init(sha1nfo *s); /** */ void sha1_writebyte(sha1nfo *s, uint8_t data); /** */ void sha1_write(sha1nfo *s, const char *data, size_t len); /** */ uint8_t* sha1_result(sha1nfo *s); /* code */ #define SHA1_K0 0x5a827999 #define SHA1_K20 0x6ed9eba1 #define SHA1_K40 0x8f1bbcdc #define SHA1_K60 0xca62c1d6 void sha1_init(sha1nfo *s) { s->state[0] = 0x67452301; s->state[1] = 0xefcdab89; s->state[2] = 0x98badcfe; s->state[3] = 0x10325476; s->state[4] = 0xc3d2e1f0; s->byteCount = 0; s->bufferOffset = 0; } uint32_t sha1_rol32(uint32_t number, uint8_t bits) { return ((number << bits) | (number >> (32-bits))); } void sha1_hashBlock(sha1nfo *s) { uint8_t i; uint32_t a,b,c,d,e,t; a=s->state[0]; b=s->state[1]; c=s->state[2]; d=s->state[3]; e=s->state[4]; for (i=0; i<80; i++) { if (i>=16) { t = s->buffer[(i+13)&15] ^ s->buffer[(i+8)&15] ^ s->buffer[(i+2)&15] ^ s->buffer[i&15]; s->buffer[i&15] = sha1_rol32(t,1); } if (i<20) { t = (d ^ (b & (c ^ d))) + SHA1_K0; } else if (i<40) { t = (b ^ c ^ d) + SHA1_K20; } else if (i<60) { t = ((b & c) | (d & (b | c))) + SHA1_K40; } else { t = (b ^ c ^ d) + SHA1_K60; } t+=sha1_rol32(a,5) + e + s->buffer[i&15]; e=d; d=c; c=sha1_rol32(b,30); b=a; a=t; } s->state[0] += a; s->state[1] += b; s->state[2] += c; s->state[3] += d; s->state[4] += e; } void sha1_addUncounted(sha1nfo *s, uint8_t data) { uint8_t * const b = (uint8_t*) s->buffer; #ifdef SHA_BIG_ENDIAN b[s->bufferOffset] = data; #else b[s->bufferOffset ^ 3] = data; #endif s->bufferOffset++; if (s->bufferOffset == BLOCK_LENGTH) { sha1_hashBlock(s); s->bufferOffset = 0; } } void sha1_writebyte(sha1nfo *s, uint8_t data) { ++s->byteCount; sha1_addUncounted(s, data); } void sha1_write(sha1nfo *s, const char *data, size_t len) { for (;len--;) sha1_writebyte(s, (uint8_t) *data++); } void sha1_pad(sha1nfo *s) { // Implement SHA-1 padding (fips180-2 §5.1.1) // Pad with 0x80 followed by 0x00 until the end of the block sha1_addUncounted(s, 0x80); while (s->bufferOffset != 56) sha1_addUncounted(s, 0x00); // Append length in the last 8 bytes sha1_addUncounted(s, 0); // We're only using 32 bit lengths sha1_addUncounted(s, 0); // But SHA-1 supports 64 bit lengths sha1_addUncounted(s, 0); // So zero pad the top bits sha1_addUncounted(s, s->byteCount >> 29); // Shifting to multiply by 8 sha1_addUncounted(s, s->byteCount >> 21); // as SHA-1 supports bitstreams as well as sha1_addUncounted(s, s->byteCount >> 13); // byte. sha1_addUncounted(s, s->byteCount >> 5); sha1_addUncounted(s, s->byteCount << 3); } uint8_t* sha1_result(sha1nfo *s) { // Pad to complete the last block sha1_pad(s); #ifndef SHA_BIG_ENDIAN // Swap byte order back int i; for (i=0; i<5; i++) { s->state[i]= (((s->state[i])<<24)& 0xff000000) | (((s->state[i])<<8) & 0x00ff0000) | (((s->state[i])>>8) & 0x0000ff00) | (((s->state[i])>>24)& 0x000000ff); } #endif // Return pointer to hash (20 characters) return (uint8_t*) s->state; } } // namespace; Added for LibFuzzer // The rest is added for LibFuzzer void fuzzer::ComputeSHA1(const uint8_t *Data, size_t Len, uint8_t *Out) { sha1nfo s; sha1_init(&s); sha1_write(&s, (const char*)Data, Len); memcpy(Out, sha1_result(&s), HASH_LENGTH); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerLoop.cpp
//===- FuzzerLoop.cpp - Fuzzer's main loop --------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Fuzzer's main loop. //===----------------------------------------------------------------------===// #include "FuzzerInternal.h" #include <sanitizer/coverage_interface.h> #include <algorithm> namespace fuzzer { // Only one Fuzzer per process. static Fuzzer *F; Fuzzer::Fuzzer(UserSuppliedFuzzer &USF, FuzzingOptions Options) : USF(USF), Options(Options) { SetDeathCallback(); InitializeTraceState(); assert(!F); F = this; } void Fuzzer::SetDeathCallback() { __sanitizer_set_death_callback(StaticDeathCallback); } void Fuzzer::PrintUnitInASCIIOrTokens(const Unit &U, const char *PrintAfter) { if (Options.Tokens.empty()) { PrintASCII(U, PrintAfter); } else { auto T = SubstituteTokens(U); T.push_back(0); Printf("%s%s", T.data(), PrintAfter); } } void Fuzzer::StaticDeathCallback() { assert(F); F->DeathCallback(); } void Fuzzer::DeathCallback() { Printf("DEATH:\n"); Print(CurrentUnit, "\n"); PrintUnitInASCIIOrTokens(CurrentUnit, "\n"); WriteToCrash(CurrentUnit, "crash-"); } void Fuzzer::StaticAlarmCallback() { assert(F); F->AlarmCallback(); } void Fuzzer::AlarmCallback() { assert(Options.UnitTimeoutSec > 0); size_t Seconds = duration_cast<seconds>(system_clock::now() - UnitStartTime).count(); if (Seconds == 0) return; if (Options.Verbosity >= 2) Printf("AlarmCallback %zd\n", Seconds); if (Seconds >= (size_t)Options.UnitTimeoutSec) { Printf("ALARM: working on the last Unit for %zd seconds\n", Seconds); Printf(" and the timeout value is %d (use -timeout=N to change)\n", Options.UnitTimeoutSec); Print(CurrentUnit, "\n"); PrintUnitInASCIIOrTokens(CurrentUnit, "\n"); WriteToCrash(CurrentUnit, "timeout-"); exit(1); } } void Fuzzer::PrintStats(const char *Where, size_t Cov, const char *End) { if (!Options.Verbosity) return; size_t Seconds = secondsSinceProcessStartUp(); size_t ExecPerSec = (Seconds ? TotalNumberOfRuns / Seconds : 0); Printf("#%zd\t%s cov %zd bits %zd units %zd exec/s %zd %s", TotalNumberOfRuns, Where, Cov, TotalBits(), Corpus.size(), ExecPerSec, End); } void Fuzzer::RereadOutputCorpus() { if (Options.OutputCorpus.empty()) return; std::vector<Unit> AdditionalCorpus; ReadDirToVectorOfUnits(Options.OutputCorpus.c_str(), &AdditionalCorpus, &EpochOfLastReadOfOutputCorpus); if (Corpus.empty()) { Corpus = AdditionalCorpus; return; } if (!Options.Reload) return; if (Options.Verbosity >= 2) Printf("Reload: read %zd new units.\n", AdditionalCorpus.size()); for (auto &X : AdditionalCorpus) { if (X.size() > (size_t)Options.MaxLen) X.resize(Options.MaxLen); if (UnitHashesAddedToCorpus.insert(Hash(X)).second) { CurrentUnit.clear(); CurrentUnit.insert(CurrentUnit.begin(), X.begin(), X.end()); size_t NewCoverage = RunOne(CurrentUnit); if (NewCoverage) { Corpus.push_back(X); if (Options.Verbosity >= 1) PrintStats("RELOAD", NewCoverage); } } } } void Fuzzer::ShuffleAndMinimize() { size_t MaxCov = 0; bool PreferSmall = (Options.PreferSmallDuringInitialShuffle == 1 || (Options.PreferSmallDuringInitialShuffle == -1 && rand() % 2)); if (Options.Verbosity) Printf("PreferSmall: %d\n", PreferSmall); PrintStats("READ ", 0); std::vector<Unit> NewCorpus; std::random_shuffle(Corpus.begin(), Corpus.end()); if (PreferSmall) std::stable_sort( Corpus.begin(), Corpus.end(), [](const Unit &A, const Unit &B) { return A.size() < B.size(); }); Unit &U = CurrentUnit; for (const auto &C : Corpus) { for (size_t First = 0; First < 1; First++) { U.clear(); size_t Last = std::min(First + Options.MaxLen, C.size()); U.insert(U.begin(), C.begin() + First, C.begin() + Last); size_t NewCoverage = RunOne(U); if (NewCoverage) { MaxCov = NewCoverage; NewCorpus.push_back(U); if (Options.Verbosity >= 2) Printf("NEW0: %zd L %zd\n", NewCoverage, U.size()); } } } Corpus = NewCorpus; for (auto &X : Corpus) UnitHashesAddedToCorpus.insert(Hash(X)); PrintStats("INITED", MaxCov); } size_t Fuzzer::RunOne(const Unit &U) { UnitStartTime = system_clock::now(); TotalNumberOfRuns++; size_t Res = 0; if (Options.UseFullCoverageSet) Res = RunOneMaximizeFullCoverageSet(U); else Res = RunOneMaximizeTotalCoverage(U); auto UnitStopTime = system_clock::now(); auto TimeOfUnit = duration_cast<seconds>(UnitStopTime - UnitStartTime).count(); if (TimeOfUnit > TimeOfLongestUnitInSeconds) { TimeOfLongestUnitInSeconds = TimeOfUnit; Printf("Longest unit: %zd s:\n", TimeOfLongestUnitInSeconds); Print(U, "\n"); } return Res; } void Fuzzer::RunOneAndUpdateCorpus(const Unit &U) { if (TotalNumberOfRuns >= Options.MaxNumberOfRuns) return; ReportNewCoverage(RunOne(U), U); } static uintptr_t HashOfArrayOfPCs(uintptr_t *PCs, uintptr_t NumPCs) { uintptr_t Res = 0; for (uintptr_t i = 0; i < NumPCs; i++) { Res = (Res + PCs[i]) * 7; } return Res; } Unit Fuzzer::SubstituteTokens(const Unit &U) const { Unit Res; for (auto Idx : U) { if (Idx < Options.Tokens.size()) { std::string Token = Options.Tokens[Idx]; Res.insert(Res.end(), Token.begin(), Token.end()); } else { Res.push_back(' '); } } // FIXME: Apply DFSan labels. return Res; } void Fuzzer::ExecuteCallback(const Unit &U) { if (Options.Tokens.empty()) { USF.TargetFunction(U.data(), U.size()); } else { auto T = SubstituteTokens(U); USF.TargetFunction(T.data(), T.size()); } } // Experimental. // Fuly reset the current coverage state, run a single unit, // compute a hash function from the full coverage set, // return non-zero if the hash value is new. // This produces tons of new units and as is it's only suitable for small tests, // e.g. test/FullCoverageSetTest.cpp. FIXME: make it scale. size_t Fuzzer::RunOneMaximizeFullCoverageSet(const Unit &U) { __sanitizer_reset_coverage(); ExecuteCallback(U); uintptr_t *PCs; uintptr_t NumPCs =__sanitizer_get_coverage_guards(&PCs); if (FullCoverageSets.insert(HashOfArrayOfPCs(PCs, NumPCs)).second) return FullCoverageSets.size(); return 0; } size_t Fuzzer::RunOneMaximizeTotalCoverage(const Unit &U) { size_t NumCounters = __sanitizer_get_number_of_counters(); if (Options.UseCounters) { CounterBitmap.resize(NumCounters); __sanitizer_update_counter_bitset_and_clear_counters(0); } size_t OldCoverage = __sanitizer_get_total_unique_coverage(); ExecuteCallback(U); size_t NewCoverage = __sanitizer_get_total_unique_coverage(); size_t NumNewBits = 0; if (Options.UseCounters) NumNewBits = __sanitizer_update_counter_bitset_and_clear_counters( CounterBitmap.data()); if (!(TotalNumberOfRuns & (TotalNumberOfRuns - 1)) && Options.Verbosity) PrintStats("pulse ", NewCoverage); if (NewCoverage > OldCoverage || NumNewBits) return NewCoverage; return 0; } void Fuzzer::WriteToOutputCorpus(const Unit &U) { if (Options.OutputCorpus.empty()) return; std::string Path = DirPlusFile(Options.OutputCorpus, Hash(U)); WriteToFile(U, Path); if (Options.Verbosity >= 2) Printf("Written to %s\n", Path.c_str()); } void Fuzzer::WriteToCrash(const Unit &U, const char *Prefix) { std::string Path = Prefix + Hash(U); WriteToFile(U, Path); Printf("CRASHED; file written to %s\nBase64: ", Path.c_str()); PrintFileAsBase64(Path); } void Fuzzer::SaveCorpus() { if (Options.OutputCorpus.empty()) return; for (const auto &U : Corpus) WriteToFile(U, DirPlusFile(Options.OutputCorpus, Hash(U))); if (Options.Verbosity) Printf("Written corpus of %zd files to %s\n", Corpus.size(), Options.OutputCorpus.c_str()); } void Fuzzer::ReportNewCoverage(size_t NewCoverage, const Unit &U) { if (!NewCoverage) return; Corpus.push_back(U); UnitHashesAddedToCorpus.insert(Hash(U)); PrintStats("NEW ", NewCoverage, ""); if (Options.Verbosity) { Printf(" L: %zd", U.size()); if (U.size() < 30) { Printf(" "); PrintUnitInASCIIOrTokens(U, "\t"); Print(U); } Printf("\n"); } WriteToOutputCorpus(U); if (Options.ExitOnFirst) exit(0); } void Fuzzer::MutateAndTestOne(Unit *U) { for (int i = 0; i < Options.MutateDepth; i++) { StartTraceRecording(); size_t Size = U->size(); U->resize(Options.MaxLen); size_t NewSize = USF.Mutate(U->data(), Size, U->size()); assert(NewSize > 0 && "Mutator returned empty unit"); assert(NewSize <= (size_t)Options.MaxLen && "Mutator return overisized unit"); U->resize(NewSize); RunOneAndUpdateCorpus(*U); size_t NumTraceBasedMutations = StopTraceRecording(); for (size_t j = 0; j < NumTraceBasedMutations; j++) { ApplyTraceBasedMutation(j, U); RunOneAndUpdateCorpus(*U); } } } void Fuzzer::Loop(size_t NumIterations) { for (size_t i = 1; i <= NumIterations; i++) { for (size_t J1 = 0; J1 < Corpus.size(); J1++) { SyncCorpus(); RereadOutputCorpus(); if (TotalNumberOfRuns >= Options.MaxNumberOfRuns) return; // First, simply mutate the unit w/o doing crosses. CurrentUnit = Corpus[J1]; MutateAndTestOne(&CurrentUnit); // Now, cross with others. if (Options.DoCrossOver && !Corpus[J1].empty()) { for (size_t J2 = 0; J2 < Corpus.size(); J2++) { CurrentUnit.resize(Options.MaxLen); size_t NewSize = USF.CrossOver( Corpus[J1].data(), Corpus[J1].size(), Corpus[J2].data(), Corpus[J2].size(), CurrentUnit.data(), CurrentUnit.size()); assert(NewSize > 0 && "CrossOver returned empty unit"); assert(NewSize <= (size_t)Options.MaxLen && "CrossOver return overisized unit"); CurrentUnit.resize(NewSize); MutateAndTestOne(&CurrentUnit); } } } } } void Fuzzer::SyncCorpus() { if (Options.SyncCommand.empty() || Options.OutputCorpus.empty()) return; auto Now = system_clock::now(); if (duration_cast<seconds>(Now - LastExternalSync).count() < Options.SyncTimeout) return; LastExternalSync = Now; ExecuteCommand(Options.SyncCommand + " " + Options.OutputCorpus); } } // namespace fuzzer
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerInternal.h
//===- FuzzerInternal.h - Internal header for the Fuzzer --------*- C++ -* ===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Define the main class fuzzer::Fuzzer and most functions. //===----------------------------------------------------------------------===// #include <cassert> #include <climits> #include <chrono> #include <cstddef> #include <cstdlib> #include <string> #include <vector> #include <unordered_set> #include "FuzzerInterface.h" namespace fuzzer { typedef std::vector<uint8_t> Unit; using namespace std::chrono; std::string FileToString(const std::string &Path); Unit FileToVector(const std::string &Path); void ReadDirToVectorOfUnits(const char *Path, std::vector<Unit> *V, long *Epoch); void WriteToFile(const Unit &U, const std::string &Path); void CopyFileToErr(const std::string &Path); // Returns "Dir/FileName" or equivalent for the current OS. std::string DirPlusFile(const std::string &DirPath, const std::string &FileName); size_t Mutate(uint8_t *Data, size_t Size, size_t MaxSize); size_t CrossOver(const uint8_t *Data1, size_t Size1, const uint8_t *Data2, size_t Size2, uint8_t *Out, size_t MaxOutSize); void Printf(const char *Fmt, ...); void Print(const Unit &U, const char *PrintAfter = ""); void PrintASCII(const Unit &U, const char *PrintAfter = ""); std::string Hash(const Unit &U); void SetTimer(int Seconds); void PrintFileAsBase64(const std::string &Path); void ExecuteCommand(const std::string &Command); // Private copy of SHA1 implementation. static const int kSHA1NumBytes = 20; // Computes SHA1 hash of 'Len' bytes in 'Data', writes kSHA1NumBytes to 'Out'. void ComputeSHA1(const uint8_t *Data, size_t Len, uint8_t *Out); int NumberOfCpuCores(); class Fuzzer { public: struct FuzzingOptions { int Verbosity = 1; int MaxLen = 0; int UnitTimeoutSec = 300; bool DoCrossOver = true; int MutateDepth = 5; bool ExitOnFirst = false; bool UseCounters = false; bool UseTraces = false; bool UseFullCoverageSet = false; bool Reload = true; int PreferSmallDuringInitialShuffle = -1; size_t MaxNumberOfRuns = ULONG_MAX; int SyncTimeout = 600; std::string OutputCorpus; std::string SyncCommand; std::vector<std::string> Tokens; }; Fuzzer(UserSuppliedFuzzer &USF, FuzzingOptions Options); void AddToCorpus(const Unit &U) { Corpus.push_back(U); } void Loop(size_t NumIterations); void ShuffleAndMinimize(); void InitializeTraceState(); size_t CorpusSize() const { return Corpus.size(); } void ReadDir(const std::string &Path, long *Epoch) { ReadDirToVectorOfUnits(Path.c_str(), &Corpus, Epoch); } void RereadOutputCorpus(); // Save the current corpus to OutputCorpus. void SaveCorpus(); size_t secondsSinceProcessStartUp() { return duration_cast<seconds>(system_clock::now() - ProcessStartTime) .count(); } size_t getTotalNumberOfRuns() { return TotalNumberOfRuns; } static void StaticAlarmCallback(); Unit SubstituteTokens(const Unit &U) const; private: void AlarmCallback(); void ExecuteCallback(const Unit &U); void MutateAndTestOne(Unit *U); void ReportNewCoverage(size_t NewCoverage, const Unit &U); size_t RunOne(const Unit &U); void RunOneAndUpdateCorpus(const Unit &U); size_t RunOneMaximizeTotalCoverage(const Unit &U); size_t RunOneMaximizeFullCoverageSet(const Unit &U); size_t RunOneMaximizeCoveragePairs(const Unit &U); void WriteToOutputCorpus(const Unit &U); void WriteToCrash(const Unit &U, const char *Prefix); void PrintStats(const char *Where, size_t Cov, const char *End = "\n"); void PrintUnitInASCIIOrTokens(const Unit &U, const char *PrintAfter = ""); void SyncCorpus(); // Trace-based fuzzing: we run a unit with some kind of tracing // enabled and record potentially useful mutations. Then // We apply these mutations one by one to the unit and run it again. // Start tracing; forget all previously proposed mutations. void StartTraceRecording(); // Stop tracing and return the number of proposed mutations. size_t StopTraceRecording(); // Apply Idx-th trace-based mutation to U. void ApplyTraceBasedMutation(size_t Idx, Unit *U); void SetDeathCallback(); static void StaticDeathCallback(); void DeathCallback(); Unit CurrentUnit; size_t TotalNumberOfRuns = 0; std::vector<Unit> Corpus; std::unordered_set<std::string> UnitHashesAddedToCorpus; std::unordered_set<uintptr_t> FullCoverageSets; // For UseCounters std::vector<uint8_t> CounterBitmap; size_t TotalBits() { // Slow. Call it only for printing stats. size_t Res = 0; for (auto x : CounterBitmap) Res += __builtin_popcount(x); return Res; } UserSuppliedFuzzer &USF; FuzzingOptions Options; system_clock::time_point ProcessStartTime = system_clock::now(); system_clock::time_point LastExternalSync = system_clock::now(); system_clock::time_point UnitStartTime; long TimeOfLongestUnitInSeconds = 0; long EpochOfLastReadOfOutputCorpus = 0; }; class SimpleUserSuppliedFuzzer: public UserSuppliedFuzzer { public: SimpleUserSuppliedFuzzer(UserCallback Callback) : Callback(Callback) {} virtual void TargetFunction(const uint8_t *Data, size_t Size) { return Callback(Data, Size); } private: UserCallback Callback; }; }; // namespace fuzzer
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerUtil.cpp
//===- FuzzerUtil.cpp - Misc utils ----------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Misc utils. //===----------------------------------------------------------------------===// #include "FuzzerInternal.h" #include <sstream> #include <iomanip> #include <sys/time.h> #include <cassert> #include <cstring> #include <signal.h> #include <unistd.h> namespace fuzzer { void Print(const Unit &v, const char *PrintAfter) { for (auto x : v) Printf("0x%x,", (unsigned) x); Printf("%s", PrintAfter); } void PrintASCII(const Unit &U, const char *PrintAfter) { for (auto X : U) { if (isprint(X)) Printf("%c", X); else Printf("\\x%x", (unsigned)X); } Printf("%s", PrintAfter); } std::string Hash(const Unit &U) { uint8_t Hash[kSHA1NumBytes]; ComputeSHA1(U.data(), U.size(), Hash); std::stringstream SS; for (int i = 0; i < kSHA1NumBytes; i++) SS << std::hex << std::setfill('0') << std::setw(2) << (unsigned)Hash[i]; return SS.str(); } static void AlarmHandler(int, siginfo_t *, void *) { Fuzzer::StaticAlarmCallback(); } void SetTimer(int Seconds) { struct itimerval T {{Seconds, 0}, {Seconds, 0}}; Printf("SetTimer %d\n", Seconds); int Res = setitimer(ITIMER_REAL, &T, nullptr); assert(Res == 0); struct sigaction sigact; memset(&sigact, 0, sizeof(sigact)); sigact.sa_sigaction = AlarmHandler; Res = sigaction(SIGALRM, &sigact, 0); assert(Res == 0); } int NumberOfCpuCores() { FILE *F = popen("nproc", "r"); int N = 0; fscanf(F, "%d", &N); fclose(F); return N; } void ExecuteCommand(const std::string &Command) { system(Command.c_str()); } } // namespace fuzzer
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/pull_and_push_fuzz_corpus.sh
#!/bin/bash # A simple script to synchronise a fuzz test corpus # with an external git repository. # Usage: # pull_and_push_fuzz_corpus.sh DIR # It assumes that DIR is inside a git repo and push # can be done w/o typing a password. cd $1 git add * git commit -m "fuzz test corpus" git pull --rebase --no-edit for((attempt=0; attempt<5; attempt++)); do echo GIT PUSH $1 ATTEMPT $attempt if $(git push); then break; fi git pull --rebase --no-edit done
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/CMakeLists.txt
set(LIBFUZZER_FLAGS_BASE "${CMAKE_CXX_FLAGS_RELEASE}") # Disable the coverage and sanitizer instrumentation for the fuzzer itself. set(CMAKE_CXX_FLAGS_RELEASE "${LIBFUZZER_FLAGS_BASE} -O2 -fno-sanitize=all") if( LLVM_USE_SANITIZE_COVERAGE ) add_library(LLVMFuzzerNoMainObjects OBJECT FuzzerCrossOver.cpp FuzzerInterface.cpp FuzzerTraceState.cpp FuzzerDriver.cpp FuzzerIO.cpp FuzzerLoop.cpp FuzzerMutate.cpp FuzzerSanitizerOptions.cpp FuzzerSHA1.cpp FuzzerUtil.cpp ) add_library(LLVMFuzzerNoMain STATIC $<TARGET_OBJECTS:LLVMFuzzerNoMainObjects> ) add_library(LLVMFuzzer STATIC FuzzerMain.cpp $<TARGET_OBJECTS:LLVMFuzzerNoMainObjects> ) if( LLVM_INCLUDE_TESTS ) add_subdirectory(test) endif() endif()
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerTraceState.cpp
//===- FuzzerTraceState.cpp - Trace-based fuzzer mutator ------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // This file implements a mutation algorithm based on instruction traces and // on taint analysis feedback from DFSan. // // Instruction traces are special hooks inserted by the compiler around // interesting instructions. Currently supported traces: // * __sanitizer_cov_trace_cmp -- inserted before every ICMP instruction, // receives the type, size and arguments of ICMP. // // Every time a traced event is intercepted we analyse the data involved // in the event and suggest a mutation for future executions. // For example if 4 bytes of data that derive from input bytes {4,5,6,7} // are compared with a constant 12345, // we try to insert 12345, 12344, 12346 into bytes // {4,5,6,7} of the next fuzzed inputs. // // The fuzzer can work only with the traces, or with both traces and DFSan. // // DataFlowSanitizer (DFSan) is a tool for // generalised dynamic data flow (taint) analysis: // http://clang.llvm.org/docs/DataFlowSanitizer.html . // // The approach with DFSan-based fuzzing has some similarity to // "Taint-based Directed Whitebox Fuzzing" // by Vijay Ganesh & Tim Leek & Martin Rinard: // http://dspace.mit.edu/openaccess-disseminate/1721.1/59320, // but it uses a full blown LLVM IR taint analysis and separate instrumentation // to analyze all of the "attack points" at once. // // Workflow with DFSan: // * lib/Fuzzer/Fuzzer*.cpp is compiled w/o any instrumentation. // * The code under test is compiled with DFSan *and* with instruction traces. // * Every call to HOOK(a,b) is replaced by DFSan with // __dfsw_HOOK(a, b, label(a), label(b)) so that __dfsw_HOOK // gets all the taint labels for the arguments. // * At the Fuzzer startup we assign a unique DFSan label // to every byte of the input string (Fuzzer::CurrentUnit) so that for any // chunk of data we know which input bytes it has derived from. // * The __dfsw_* functions (implemented in this file) record the // parameters (i.e. the application data and the corresponding taint labels) // in a global state. // * Fuzzer::ApplyTraceBasedMutation() tries to use the data recorded // by __dfsw_* hooks to guide the fuzzing towards new application states. // // Parts of this code will not function when DFSan is not linked in. // Instead of using ifdefs and thus requiring a separate build of lib/Fuzzer // we redeclare the dfsan_* interface functions as weak and check if they // are nullptr before calling. // If this approach proves to be useful we may add attribute(weak) to the // dfsan declarations in dfsan_interface.h // // This module is in the "proof of concept" stage. // It is capable of solving only the simplest puzzles // like test/dfsan/DFSanSimpleCmpTest.cpp. //===----------------------------------------------------------------------===// /* Example of manual usage (-fsanitize=dataflow is optional): ( cd $LLVM/lib/Fuzzer/ clang -fPIC -c -g -O2 -std=c++11 Fuzzer*.cpp clang++ -O0 -std=c++11 -fsanitize-coverage=edge,trace-cmp \ -fsanitize=dataflow \ test/dfsan/DFSanSimpleCmpTest.cpp Fuzzer*.o ./a.out ) */ #include "FuzzerInternal.h" #include <sanitizer/dfsan_interface.h> #include <algorithm> #include <cstring> #include <unordered_map> extern "C" { __attribute__((weak)) dfsan_label dfsan_create_label(const char *desc, void *userdata); __attribute__((weak)) void dfsan_set_label(dfsan_label label, void *addr, size_t size); __attribute__((weak)) void dfsan_add_label(dfsan_label label, void *addr, size_t size); __attribute__((weak)) const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label); __attribute__((weak)) dfsan_label dfsan_read_label(const void *addr, size_t size); } // extern "C" namespace fuzzer { static bool ReallyHaveDFSan() { return &dfsan_create_label != nullptr; } // These values are copied from include/llvm/IR/InstrTypes.h. // We do not include the LLVM headers here to remain independent. // If these values ever change, an assertion in ComputeCmp will fail. enum Predicate { ICMP_EQ = 32, ///< equal ICMP_NE = 33, ///< not equal ICMP_UGT = 34, ///< unsigned greater than ICMP_UGE = 35, ///< unsigned greater or equal ICMP_ULT = 36, ///< unsigned less than ICMP_ULE = 37, ///< unsigned less or equal ICMP_SGT = 38, ///< signed greater than ICMP_SGE = 39, ///< signed greater or equal ICMP_SLT = 40, ///< signed less than ICMP_SLE = 41, ///< signed less or equal }; template <class U, class S> bool ComputeCmp(size_t CmpType, U Arg1, U Arg2) { switch(CmpType) { case ICMP_EQ : return Arg1 == Arg2; case ICMP_NE : return Arg1 != Arg2; case ICMP_UGT: return Arg1 > Arg2; case ICMP_UGE: return Arg1 >= Arg2; case ICMP_ULT: return Arg1 < Arg2; case ICMP_ULE: return Arg1 <= Arg2; case ICMP_SGT: return (S)Arg1 > (S)Arg2; case ICMP_SGE: return (S)Arg1 >= (S)Arg2; case ICMP_SLT: return (S)Arg1 < (S)Arg2; case ICMP_SLE: return (S)Arg1 <= (S)Arg2; default: assert(0 && "unsupported CmpType"); } return false; } static bool ComputeCmp(size_t CmpSize, size_t CmpType, uint64_t Arg1, uint64_t Arg2) { if (CmpSize == 8) return ComputeCmp<uint64_t, int64_t>(CmpType, Arg1, Arg2); if (CmpSize == 4) return ComputeCmp<uint32_t, int32_t>(CmpType, Arg1, Arg2); if (CmpSize == 2) return ComputeCmp<uint16_t, int16_t>(CmpType, Arg1, Arg2); if (CmpSize == 1) return ComputeCmp<uint8_t, int8_t>(CmpType, Arg1, Arg2); assert(0 && "unsupported type size"); return true; } // As a simplification we use the range of input bytes instead of a set of input // bytes. struct LabelRange { uint16_t Beg, End; // Range is [Beg, End), thus Beg==End is an empty range. LabelRange(uint16_t Beg = 0, uint16_t End = 0) : Beg(Beg), End(End) {} static LabelRange Join(LabelRange LR1, LabelRange LR2) { if (LR1.Beg == LR1.End) return LR2; if (LR2.Beg == LR2.End) return LR1; return {std::min(LR1.Beg, LR2.Beg), std::max(LR1.End, LR2.End)}; } LabelRange &Join(LabelRange LR) { return *this = Join(*this, LR); } static LabelRange Singleton(const dfsan_label_info *LI) { uint16_t Idx = (uint16_t)(uintptr_t)LI->userdata; assert(Idx > 0); return {(uint16_t)(Idx - 1), Idx}; } }; // For now, very simple: put Size bytes of Data at position Pos. struct TraceBasedMutation { size_t Pos; size_t Size; uint64_t Data; }; class TraceState { public: TraceState(const Fuzzer::FuzzingOptions &Options, const Unit &CurrentUnit) : Options(Options), CurrentUnit(CurrentUnit) {} LabelRange GetLabelRange(dfsan_label L); void DFSanCmpCallback(uintptr_t PC, size_t CmpSize, size_t CmpType, uint64_t Arg1, uint64_t Arg2, dfsan_label L1, dfsan_label L2); void TraceCmpCallback(size_t CmpSize, size_t CmpType, uint64_t Arg1, uint64_t Arg2); int TryToAddDesiredData(uint64_t PresentData, uint64_t DesiredData, size_t DataSize); void StartTraceRecording() { if (!Options.UseTraces) return; RecordingTraces = true; Mutations.clear(); } size_t StopTraceRecording() { RecordingTraces = false; std::random_shuffle(Mutations.begin(), Mutations.end()); return Mutations.size(); } void ApplyTraceBasedMutation(size_t Idx, fuzzer::Unit *U); private: bool IsTwoByteData(uint64_t Data) { int64_t Signed = static_cast<int64_t>(Data); Signed >>= 16; return Signed == 0 || Signed == -1L; } bool RecordingTraces = false; std::vector<TraceBasedMutation> Mutations; LabelRange LabelRanges[1 << (sizeof(dfsan_label) * 8)] = {}; const Fuzzer::FuzzingOptions &Options; const Unit &CurrentUnit; }; LabelRange TraceState::GetLabelRange(dfsan_label L) { LabelRange &LR = LabelRanges[L]; if (LR.Beg < LR.End || L == 0) return LR; const dfsan_label_info *LI = dfsan_get_label_info(L); if (LI->l1 || LI->l2) return LR = LabelRange::Join(GetLabelRange(LI->l1), GetLabelRange(LI->l2)); return LR = LabelRange::Singleton(LI); } void TraceState::ApplyTraceBasedMutation(size_t Idx, fuzzer::Unit *U) { assert(Idx < Mutations.size()); auto &M = Mutations[Idx]; if (Options.Verbosity >= 3) Printf("TBM %zd %zd %zd\n", M.Pos, M.Size, M.Data); if (M.Pos + M.Size > U->size()) return; memcpy(U->data() + M.Pos, &M.Data, M.Size); } void TraceState::DFSanCmpCallback(uintptr_t PC, size_t CmpSize, size_t CmpType, uint64_t Arg1, uint64_t Arg2, dfsan_label L1, dfsan_label L2) { assert(ReallyHaveDFSan()); if (!RecordingTraces) return; if (L1 == 0 && L2 == 0) return; // Not actionable. if (L1 != 0 && L2 != 0) return; // Probably still actionable. bool Res = ComputeCmp(CmpSize, CmpType, Arg1, Arg2); uint64_t Data = L1 ? Arg2 : Arg1; LabelRange LR = L1 ? GetLabelRange(L1) : GetLabelRange(L2); for (size_t Pos = LR.Beg; Pos + CmpSize <= LR.End; Pos++) { Mutations.push_back({Pos, CmpSize, Data}); Mutations.push_back({Pos, CmpSize, Data + 1}); Mutations.push_back({Pos, CmpSize, Data - 1}); } if (CmpSize > LR.End - LR.Beg) Mutations.push_back({LR.Beg, (unsigned)(LR.End - LR.Beg), Data}); if (Options.Verbosity >= 3) Printf("DFSAN: PC %lx S %zd T %zd A1 %llx A2 %llx R %d L1 %d L2 %d MU %zd\n", PC, CmpSize, CmpType, Arg1, Arg2, Res, L1, L2, Mutations.size()); } int TraceState::TryToAddDesiredData(uint64_t PresentData, uint64_t DesiredData, size_t DataSize) { int Res = 0; const uint8_t *Beg = CurrentUnit.data(); const uint8_t *End = Beg + CurrentUnit.size(); for (const uint8_t *Cur = Beg; Cur < End; Cur += DataSize) { Cur = (uint8_t *)memmem(Cur, End - Cur, &PresentData, DataSize); if (!Cur) break; size_t Pos = Cur - Beg; assert(Pos < CurrentUnit.size()); Mutations.push_back({Pos, DataSize, DesiredData}); Mutations.push_back({Pos, DataSize, DesiredData + 1}); Mutations.push_back({Pos, DataSize, DesiredData - 1}); Cur += DataSize; Res++; } return Res; } void TraceState::TraceCmpCallback(size_t CmpSize, size_t CmpType, uint64_t Arg1, uint64_t Arg2) { if (!RecordingTraces) return; int Added = 0; if (Options.Verbosity >= 3) Printf("TraceCmp: %zd %zd\n", Arg1, Arg2); Added += TryToAddDesiredData(Arg1, Arg2, CmpSize); Added += TryToAddDesiredData(Arg2, Arg1, CmpSize); if (!Added && CmpSize == 4 && IsTwoByteData(Arg1) && IsTwoByteData(Arg2)) { Added += TryToAddDesiredData(Arg1, Arg2, 2); Added += TryToAddDesiredData(Arg2, Arg1, 2); } } static TraceState *TS; void Fuzzer::StartTraceRecording() { if (!TS) return; TS->StartTraceRecording(); } size_t Fuzzer::StopTraceRecording() { if (!TS) return 0; return TS->StopTraceRecording(); } void Fuzzer::ApplyTraceBasedMutation(size_t Idx, Unit *U) { assert(TS); TS->ApplyTraceBasedMutation(Idx, U); } void Fuzzer::InitializeTraceState() { if (!Options.UseTraces) return; TS = new TraceState(Options, CurrentUnit); CurrentUnit.resize(Options.MaxLen); // The rest really requires DFSan. if (!ReallyHaveDFSan()) return; for (size_t i = 0; i < static_cast<size_t>(Options.MaxLen); i++) { dfsan_label L = dfsan_create_label("input", (void*)(i + 1)); // We assume that no one else has called dfsan_create_label before. assert(L == i + 1); dfsan_set_label(L, &CurrentUnit[i], 1); } } } // namespace fuzzer using fuzzer::TS; extern "C" { void __dfsw___sanitizer_cov_trace_cmp(uint64_t SizeAndType, uint64_t Arg1, uint64_t Arg2, dfsan_label L0, dfsan_label L1, dfsan_label L2) { if (!TS) return; assert(L0 == 0); uintptr_t PC = reinterpret_cast<uintptr_t>(__builtin_return_address(0)); uint64_t CmpSize = (SizeAndType >> 32) / 8; uint64_t Type = (SizeAndType << 32) >> 32; TS->DFSanCmpCallback(PC, CmpSize, Type, Arg1, Arg2, L1, L2); } void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2, size_t n, dfsan_label s1_label, dfsan_label s2_label, dfsan_label n_label) { if (!TS) return; uintptr_t PC = reinterpret_cast<uintptr_t>(caller_pc); uint64_t S1 = 0, S2 = 0; // Simplification: handle only first 8 bytes. memcpy(&S1, s1, std::min(n, sizeof(S1))); memcpy(&S2, s2, std::min(n, sizeof(S2))); dfsan_label L1 = dfsan_read_label(s1, n); dfsan_label L2 = dfsan_read_label(s2, n); TS->DFSanCmpCallback(PC, n, fuzzer::ICMP_EQ, S1, S2, L1, L2); } void __sanitizer_cov_trace_cmp(uint64_t SizeAndType, uint64_t Arg1, uint64_t Arg2) { if (!TS) return; uint64_t CmpSize = (SizeAndType >> 32) / 8; uint64_t Type = (SizeAndType << 32) >> 32; TS->TraceCmpCallback(CmpSize, Type, Arg1, Arg2); } } // extern "C"
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerInterface.cpp
//===- FuzzerInterface.cpp - Mutate a test input --------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Parts of public interface for libFuzzer. //===----------------------------------------------------------------------===// #include "FuzzerInterface.h" #include "FuzzerInternal.h" namespace fuzzer { size_t UserSuppliedFuzzer::BasicMutate(uint8_t *Data, size_t Size, size_t MaxSize) { return ::fuzzer::Mutate(Data, Size, MaxSize); } size_t UserSuppliedFuzzer::BasicCrossOver(const uint8_t *Data1, size_t Size1, const uint8_t *Data2, size_t Size2, uint8_t *Out, size_t MaxOutSize) { return ::fuzzer::CrossOver(Data1, Size1, Data2, Size2, Out, MaxOutSize); } } // namespace fuzzer.
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerFlags.def
//===- FuzzerFlags.def - Run-time flags -------------------------*- C++ -* ===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Flags. FUZZER_FLAG_INT/FUZZER_FLAG_STRING macros should be defined at the // point of inclusion. We are not using any flag parsing library for better // portability and independence. //===----------------------------------------------------------------------===// FUZZER_FLAG_INT(verbosity, 1, "Verbosity level.") FUZZER_FLAG_INT(seed, 0, "Random seed. If 0, seed is generated.") FUZZER_FLAG_INT(iterations, -1, "Number of iterations of the fuzzer internal loop" " (-1 for infinite iterations).") FUZZER_FLAG_INT(runs, -1, "Number of individual test runs (-1 for infinite runs).") FUZZER_FLAG_INT(max_len, 64, "Maximum length of the test input.") FUZZER_FLAG_INT(cross_over, 1, "If 1, cross over inputs.") FUZZER_FLAG_INT(mutate_depth, 5, "Apply this number of consecutive mutations to each input.") FUZZER_FLAG_INT( prefer_small_during_initial_shuffle, -1, "If 1, always prefer smaller inputs during the initial corpus shuffle." " If 0, never do that. If -1, do it sometimes.") FUZZER_FLAG_INT(exit_on_first, 0, "If 1, exit after the first new interesting input is found.") FUZZER_FLAG_INT( timeout, 1200, "Timeout in seconds (if positive). " "If one unit runs more than this number of seconds the process will abort.") FUZZER_FLAG_INT(help, 0, "Print help.") FUZZER_FLAG_INT( save_minimized_corpus, 0, "If 1, the minimized corpus is saved into the first input directory") FUZZER_FLAG_INT(use_counters, 1, "Use coverage counters") FUZZER_FLAG_INT(use_traces, 0, "Experimental: use instruction traces") FUZZER_FLAG_INT(use_full_coverage_set, 0, "Experimental: Maximize the number of different full" " coverage sets as opposed to maximizing the total coverage." " This is potentially MUCH slower, but may discover more paths.") FUZZER_FLAG_INT(jobs, 0, "Number of jobs to run. If jobs >= 1 we spawn" " this number of jobs in separate worker processes" " with stdout/stderr redirected to fuzz-JOB.log.") FUZZER_FLAG_INT(workers, 0, "Number of simultaneous worker processes to run the jobs." " If zero, \"min(jobs,NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload, 1, "Reload the main corpus periodically to get new units" "discovered by other processes.") FUZZER_FLAG_STRING(tokens, "Use the file with tokens (one token per line) to" " fuzz a token based input language.") FUZZER_FLAG_STRING(apply_tokens, "Read the given input file, substitute bytes " " with tokens and write the result to stdout.") FUZZER_FLAG_STRING(sync_command, "Execute an external command " "\"<sync_command> <test_corpus>\" " "to synchronize the test corpus.") FUZZER_FLAG_INT(sync_timeout, 600, "Minimum timeout between syncs.")
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/FuzzerCrossOver.cpp
//===- FuzzerCrossOver.cpp - Cross over two test inputs -------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Cross over test inputs. //===----------------------------------------------------------------------===// #include <cstring> #include "FuzzerInternal.h" namespace fuzzer { // Cross Data1 and Data2, store the result (up to MaxOutSize bytes) in Out. size_t CrossOver(const uint8_t *Data1, size_t Size1, const uint8_t *Data2, size_t Size2, uint8_t *Out, size_t MaxOutSize) { assert(Size1 || Size2); MaxOutSize = rand() % MaxOutSize + 1; size_t OutPos = 0; size_t Pos1 = 0; size_t Pos2 = 0; size_t *InPos = &Pos1; size_t InSize = Size1; const uint8_t *Data = Data1; bool CurrentlyUsingFirstData = true; while (OutPos < MaxOutSize && (Pos1 < Size1 || Pos2 < Size2)) { // Merge a part of Data into Out. size_t OutSizeLeft = MaxOutSize - OutPos; if (*InPos < InSize) { size_t InSizeLeft = InSize - *InPos; size_t MaxExtraSize = std::min(OutSizeLeft, InSizeLeft); size_t ExtraSize = rand() % MaxExtraSize + 1; memcpy(Out + OutPos, Data + *InPos, ExtraSize); OutPos += ExtraSize; (*InPos) += ExtraSize; } // Use the other input data on the next iteration. InPos = CurrentlyUsingFirstData ? &Pos2 : &Pos1; InSize = CurrentlyUsingFirstData ? Size2 : Size1; Data = CurrentlyUsingFirstData ? Data2 : Data1; CurrentlyUsingFirstData = !CurrentlyUsingFirstData; } return OutPos; } } // namespace fuzzer
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Fuzzer/cxx_fuzzer_tokens.txt
# ## ` ~ ! @ $ % ^ & * ( ) _ - _ = + { } [ ] | \ , . / ? > < ; : ' " ++ -- << >> += -= *= /= >>= <<= &= |= ^= %= != && || == >= <= -> 0 1 2 3 4 5 6 7 8 9 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z a b c d e f g h i j k l m n o p q r s t u v w x y z alignas alignof and and_eq asm auto bitand bitor bool break case catch char char16_t char32_t class compl concept const constexpr const_cast continue decltype default delete do double dynamic_cast else enum explicit export extern false float for friend goto if inline int long mutable namespace new noexcept not not_eq nullptr operator or or_eq private protected public register reinterpret_cast requires return short signed sizeof static static_assert static_cast struct switch template this thread_local throw true try typedef typeid typename union unsigned using virtual void volatile wchar_t while xor xor_eq if elif else endif defined ifdef ifndef define undef include line error pragma override final
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/FuzzerUnittest.cpp
#include "FuzzerInternal.h" #include "gtest/gtest.h" #include <set> // For now, have LLVMFuzzerTestOneInput just to make it link. // Later we may want to make unittests that actually call LLVMFuzzerTestOneInput. extern "C" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { abort(); } TEST(Fuzzer, CrossOver) { using namespace fuzzer; Unit A({0, 1, 2}), B({5, 6, 7}); Unit C; Unit Expected[] = { { 0 }, { 0, 1 }, { 0, 5 }, { 0, 1, 2 }, { 0, 1, 5 }, { 0, 5, 1 }, { 0, 5, 6 }, { 0, 1, 2, 5 }, { 0, 1, 5, 2 }, { 0, 1, 5, 6 }, { 0, 5, 1, 2 }, { 0, 5, 1, 6 }, { 0, 5, 6, 1 }, { 0, 5, 6, 7 }, { 0, 1, 2, 5, 6 }, { 0, 1, 5, 2, 6 }, { 0, 1, 5, 6, 2 }, { 0, 1, 5, 6, 7 }, { 0, 5, 1, 2, 6 }, { 0, 5, 1, 6, 2 }, { 0, 5, 1, 6, 7 }, { 0, 5, 6, 1, 2 }, { 0, 5, 6, 1, 7 }, { 0, 5, 6, 7, 1 }, { 0, 1, 2, 5, 6, 7 }, { 0, 1, 5, 2, 6, 7 }, { 0, 1, 5, 6, 2, 7 }, { 0, 1, 5, 6, 7, 2 }, { 0, 5, 1, 2, 6, 7 }, { 0, 5, 1, 6, 2, 7 }, { 0, 5, 1, 6, 7, 2 }, { 0, 5, 6, 1, 2, 7 }, { 0, 5, 6, 1, 7, 2 }, { 0, 5, 6, 7, 1, 2 } }; for (size_t Len = 1; Len < 8; Len++) { std::set<Unit> FoundUnits, ExpectedUnitsWitThisLength; for (int Iter = 0; Iter < 3000; Iter++) { C.resize(Len); size_t NewSize = CrossOver(A.data(), A.size(), B.data(), B.size(), C.data(), C.size()); C.resize(NewSize); FoundUnits.insert(C); } for (const Unit &U : Expected) if (U.size() <= Len) ExpectedUnitsWitThisLength.insert(U); EXPECT_EQ(ExpectedUnitsWitThisLength, FoundUnits); } } TEST(Fuzzer, Hash) { uint8_t A[] = {'a', 'b', 'c'}; fuzzer::Unit U(A, A + sizeof(A)); EXPECT_EQ("a9993e364706816aba3e25717850c26c9cd0d89d", fuzzer::Hash(U)); U.push_back('d'); EXPECT_EQ("81fe8bfe87576c3ecb22426f8e57847382917acf", fuzzer::Hash(U)); }
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/UserSuppliedFuzzerTest.cpp
// Simple test for a fuzzer. // The fuzzer must find the string "Hi!" preceded by a magic value. // Uses UserSuppliedFuzzer which ensures that the magic is present. #include <cstdint> #include <cassert> #include <cstdlib> #include <cstddef> #include <cstring> #include <iostream> #include "FuzzerInterface.h" static const uint64_t kMagic = 8860221463604ULL; class MyFuzzer : public fuzzer::UserSuppliedFuzzer { public: void TargetFunction(const uint8_t *Data, size_t Size) { if (Size <= 10) return; if (memcmp(Data, &kMagic, sizeof(kMagic))) return; // It's hard to get here w/o advanced fuzzing techniques (e.g. cmp tracing). // So, we simply 'fix' the data in the custom mutator. if (Data[8] == 'H') { if (Data[9] == 'i') { if (Data[10] == '!') { std::cout << "BINGO; Found the target, exiting\n"; exit(1); } } } } // Custom mutator. virtual size_t Mutate(uint8_t *Data, size_t Size, size_t MaxSize) { assert(MaxSize > sizeof(kMagic)); if (Size < sizeof(kMagic)) Size = sizeof(kMagic); // "Fix" the data, then mutate. memcpy(Data, &kMagic, std::min(MaxSize, sizeof(kMagic))); return BasicMutate(Data + sizeof(kMagic), Size - sizeof(kMagic), MaxSize - sizeof(kMagic)); } // No need to redefine CrossOver() here. }; int main(int argc, char **argv) { MyFuzzer F; fuzzer::FuzzerDriver(argc, argv, F); }
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/TimeoutTest.cpp
// Simple test for a fuzzer. The fuzzer must find the string "Hi!". #include <cstdint> #include <cstdlib> #include <cstddef> #include <iostream> static volatile int Sink; extern "C" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { if (Size > 0 && Data[0] == 'H') { Sink = 1; if (Size > 1 && Data[1] == 'i') { Sink = 2; if (Size > 2 && Data[2] == '!') { Sink = 2; while (Sink) ; } } } }
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/FourIndependentBranchesTest.cpp
// Simple test for a fuzzer. The fuzzer must find the string "FUZZ". #include <cstdint> #include <cstdlib> #include <cstddef> #include <iostream> extern "C" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { int bits = 0; if (Size > 0 && Data[0] == 'F') bits |= 1; if (Size > 1 && Data[1] == 'U') bits |= 2; if (Size > 2 && Data[2] == 'Z') bits |= 4; if (Size > 3 && Data[3] == 'Z') bits |= 8; if (bits == 15) { std::cerr << "BINGO!\n"; exit(1); } }
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/lit.site.cfg.in
config.test_exec_root = "@CMAKE_CURRENT_BINARY_DIR@" config.llvm_tools_dir = "@LLVM_TOOLS_DIR@" lit_config.load_config(config, "@CMAKE_CURRENT_SOURCE_DIR@/lit.cfg")
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/CounterTest.cpp
// Test for a fuzzer: must find the case where a particular basic block is // executed many times. #include <iostream> extern "C" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { int Num = 0; for (size_t i = 0; i < Size; i++) if (Data[i] == 'A' + i) Num++; if (Num >= 4) { std::cerr << "BINGO!\n"; exit(1); } }
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/CMakeLists.txt
# Build all these tests with -O0, otherwise optimizations may merge some # basic blocks and we'll fail to discover the targets. # Also enable the coverage instrumentation back (it is disabled # for the Fuzzer lib) set(CMAKE_CXX_FLAGS_RELEASE "${LIBFUZZER_FLAGS_BASE} -O0 -fsanitize-coverage=edge,indirect-calls") set(DFSanTests DFSanMemcmpTest DFSanSimpleCmpTest ) set(Tests CounterTest CxxTokensTest FourIndependentBranchesTest FullCoverageSetTest InfiniteTest NullDerefTest SimpleTest TimeoutTest ${DFSanTests} ) set(CustomMainTests UserSuppliedFuzzerTest ) set(TestBinaries) foreach(Test ${Tests}) add_executable(LLVMFuzzer-${Test} ${Test}.cpp ) target_link_libraries(LLVMFuzzer-${Test} LLVMFuzzer ) set(TestBinaries ${TestBinaries} LLVMFuzzer-${Test}) endforeach() foreach(Test ${CustomMainTests}) add_executable(LLVMFuzzer-${Test} ${Test}.cpp ) target_link_libraries(LLVMFuzzer-${Test} LLVMFuzzerNoMain ) set(TestBinaries ${TestBinaries} LLVMFuzzer-${Test}) endforeach() configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg ) configure_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/unit/lit.site.cfg.in ${CMAKE_CURRENT_BINARY_DIR}/unit/lit.site.cfg ) include_directories(..) include_directories(${LLVM_MAIN_SRC_DIR}/utils/unittest/googletest/include) add_executable(LLVMFuzzer-Unittest FuzzerUnittest.cpp $<TARGET_OBJECTS:LLVMFuzzerNoMainObjects> ) target_link_libraries(LLVMFuzzer-Unittest gtest gtest_main ) set(TestBinaries ${TestBinaries} LLVMFuzzer-Unittest) add_subdirectory(dfsan) foreach(Test ${DFSanTests}) set(TestBinaries ${TestBinaries} LLVMFuzzer-${Test}-DFSan) endforeach() set_target_properties(${TestBinaries} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ) add_lit_testsuite(check-fuzzer "Running Fuzzer tests" ${CMAKE_CURRENT_BINARY_DIR} DEPENDS ${TestBinaries} FileCheck not )
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/FullCoverageSetTest.cpp
// Simple test for a fuzzer. The fuzzer must find the string "FUZZER". #include <cstdint> #include <cstdlib> #include <cstddef> #include <iostream> extern "C" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { int bits = 0; if (Size > 0 && Data[0] == 'F') bits |= 1; if (Size > 1 && Data[1] == 'U') bits |= 2; if (Size > 2 && Data[2] == 'Z') bits |= 4; if (Size > 3 && Data[3] == 'Z') bits |= 8; if (Size > 4 && Data[4] == 'E') bits |= 16; if (Size > 5 && Data[5] == 'R') bits |= 32; if (bits == 63) { std::cerr << "BINGO!\n"; exit(1); } }
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/InfiniteTest.cpp
// Simple test for a fuzzer. The fuzzer must find the string "Hi!". #include <cstdint> #include <cstdlib> #include <cstddef> #include <iostream> static volatile int Sink; static volatile int One = 1; extern "C" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { if (Size > 0 && Data[0] == 'H') { Sink = 1; if (Size > 1 && Data[1] == 'i') { Sink = 2; if (Size > 2 && Data[2] == '!') { Sink = 2; while (One) ; } } } }
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/DFSanMemcmpTest.cpp
// Simple test for a fuzzer. The fuzzer must find a particular string. #include <cstring> #include <cstdint> #include <cstdio> #include <cstdlib> extern "C" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { if (Size >= 8 && memcmp(Data, "01234567", 8) == 0) { fprintf(stderr, "BINGO\n"); exit(1); } }
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/NullDerefTest.cpp
// Simple test for a fuzzer. The fuzzer must find the string "Hi!". #include <cstdint> #include <cstdlib> #include <cstddef> #include <iostream> static volatile int Sink; static volatile int *Null = 0; extern "C" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { if (Size > 0 && Data[0] == 'H') { Sink = 1; if (Size > 1 && Data[1] == 'i') { Sink = 2; if (Size > 2 && Data[2] == '!') { std::cout << "Found the target, dereferencing NULL\n"; *Null = 1; } } } }
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/fuzzer.test
CHECK: BINGO RUN: ./LLVMFuzzer-SimpleTest 2>&1 | FileCheck %s RUN: not ./LLVMFuzzer-InfiniteTest -timeout=2 2>&1 | FileCheck %s --check-prefix=InfiniteTest InfiniteTest: ALARM: working on the last Unit for InfiniteTest: CRASHED; file written to timeout RUN: not ./LLVMFuzzer-TimeoutTest -timeout=5 2>&1 | FileCheck %s --check-prefix=TimeoutTest TimeoutTest: ALARM: working on the last Unit for TimeoutTest: CRASHED; file written to timeout RUN: not ./LLVMFuzzer-NullDerefTest 2>&1 | FileCheck %s --check-prefix=NullDerefTest NullDerefTest: CRASHED; file written to crash- RUN: not ./LLVMFuzzer-FullCoverageSetTest -timeout=15 -seed=1 -mutate_depth=2 -use_full_coverage_set=1 2>&1 | FileCheck %s RUN: not ./LLVMFuzzer-FourIndependentBranchesTest -timeout=15 -seed=1 -use_full_coverage_set=1 2>&1 | FileCheck %s RUN: not ./LLVMFuzzer-CounterTest -use_counters=1 -max_len=6 -seed=1 -timeout=15 2>&1 | FileCheck %s RUN: not ./LLVMFuzzer-DFSanSimpleCmpTest-DFSan -use_traces=1 -seed=1 -runs=1000000 -timeout=5 2>&1 | FileCheck %s RUN: not ./LLVMFuzzer-DFSanSimpleCmpTest -use_traces=1 -seed=1 -runs=1000000 -timeout=5 2>&1 | FileCheck %s RUN: not ./LLVMFuzzer-DFSanMemcmpTest-DFSan -use_traces=1 -seed=1 -runs=100 -timeout=5 2>&1 | FileCheck %s RUN: not ./LLVMFuzzer-CxxTokensTest -seed=1 -timeout=15 -tokens=%S/../cxx_fuzzer_tokens.txt 2>&1 | FileCheck %s RUN: not ./LLVMFuzzer-UserSuppliedFuzzerTest -seed=1 -timeout=15 2>&1 | FileCheck %s
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/DFSanSimpleCmpTest.cpp
// Simple test for a fuzzer. The fuzzer must find several narrow ranges. #include <cstdint> #include <cstdlib> #include <cstring> #include <cstdio> extern "C" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { if (Size < 14) return; uint64_t x = 0; int64_t y = 0; int z = 0; unsigned short a = 0; memcpy(&x, Data, 8); memcpy(&y, Data + Size - 8, 8); memcpy(&z, Data + Size / 2, sizeof(z)); memcpy(&a, Data + Size / 2 + 4, sizeof(a)); if (x > 1234567890 && x < 1234567895 && y >= 987654321 && y <= 987654325 && z < -10000 && z >= -10005 && z != -10003 && a == 4242) { fprintf(stderr, "BINGO; Found the target: size %zd (%zd, %zd, %d, %d), exiting.\n", Size, x, y, z, a); exit(1); } }
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/CxxTokensTest.cpp
// Simple test for a fuzzer. The fuzzer must find a sequence of C++ tokens. #include <cstdint> #include <cstdlib> #include <cstddef> #include <cstring> #include <iostream> static void Found() { std::cout << "BINGO; Found the target, exiting\n"; exit(1); } extern "C" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { // looking for "thread_local unsigned A;" if (Size < 24) return; if (0 == memcmp(&Data[0], "thread_local", 12)) if (Data[12] == ' ') if (0 == memcmp(&Data[13], "unsigned", 8)) if (Data[21] == ' ') if (Data[22] == 'A') if (Data[23] == ';') Found(); }
0
repos/DirectXShaderCompiler/lib/Fuzzer
repos/DirectXShaderCompiler/lib/Fuzzer/test/SimpleTest.cpp
// Simple test for a fuzzer. The fuzzer must find the string "Hi!". #include <cstdint> #include <cstdlib> #include <cstddef> #include <iostream> static volatile int Sink; extern "C" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { if (Size > 0 && Data[0] == 'H') { Sink = 1; if (Size > 1 && Data[1] == 'i') { Sink = 2; if (Size > 2 && Data[2] == '!') { std::cout << "BINGO; Found the target, exiting\n"; exit(0); } } } }
0
repos/DirectXShaderCompiler/lib/Fuzzer/test
repos/DirectXShaderCompiler/lib/Fuzzer/test/unit/lit.site.cfg.in
config.test_exec_root = "@CMAKE_CURRENT_BINARY_DIR@" lit_config.load_config(config, "@CMAKE_CURRENT_SOURCE_DIR@/unit/lit.cfg")
0
repos/DirectXShaderCompiler/lib/Fuzzer/test
repos/DirectXShaderCompiler/lib/Fuzzer/test/dfsan/CMakeLists.txt
# These tests depend on both coverage and dfsan instrumentation. set(CMAKE_CXX_FLAGS_RELEASE "${LIBFUZZER_FLAGS_BASE} -O0 -fno-sanitize=all -fsanitize=dataflow") foreach(Test ${DFSanTests}) add_executable(LLVMFuzzer-${Test}-DFSan ../${Test}.cpp ) target_link_libraries(LLVMFuzzer-${Test}-DFSan LLVMFuzzer ) endforeach()
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Bitcode/CMakeLists.txt
add_subdirectory(Reader) add_subdirectory(Writer)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Bitcode/LLVMBuild.txt
;===- ./lib/Bitcode/LLVMBuild.txt ------------------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [common] subdirectories = Reader Writer [component_0] type = Group name = Bitcode parent = Libraries
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Bitcode/module.modulemap
module Bitcode { requires cplusplus umbrella "." module * { export * } }
0
repos/DirectXShaderCompiler/lib/Bitcode
repos/DirectXShaderCompiler/lib/Bitcode/Writer/BitWriter.cpp
//===-- BitWriter.cpp -----------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm-c/BitWriter.h" #include "llvm/Bitcode/ReaderWriter.h" #include "llvm/IR/Module.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; /*===-- Operations on modules ---------------------------------------------===*/ int LLVMWriteBitcodeToFile(LLVMModuleRef M, const char *Path) { std::error_code EC; raw_fd_ostream OS(Path, EC, sys::fs::F_None); if (EC) return -1; WriteBitcodeToFile(unwrap(M), OS); return 0; } int LLVMWriteBitcodeToFD(LLVMModuleRef M, int FD, int ShouldClose, int Unbuffered) { raw_fd_ostream OS(FD, ShouldClose, Unbuffered); WriteBitcodeToFile(unwrap(M), OS); return 0; } int LLVMWriteBitcodeToFileHandle(LLVMModuleRef M, int FileHandle) { return LLVMWriteBitcodeToFD(M, FileHandle, true, false); } LLVMMemoryBufferRef LLVMWriteBitcodeToMemoryBuffer(LLVMModuleRef M) { std::string Data; raw_string_ostream OS(Data); WriteBitcodeToFile(unwrap(M), OS); return wrap(MemoryBuffer::getMemBufferCopy(OS.str()).release()); }
0
repos/DirectXShaderCompiler/lib/Bitcode
repos/DirectXShaderCompiler/lib/Bitcode/Writer/ValueEnumerator.h
//===-- Bitcode/Writer/ValueEnumerator.h - Number values --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This class gives values and types Unique ID's. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_BITCODE_WRITER_VALUEENUMERATOR_H #define LLVM_LIB_BITCODE_WRITER_VALUEENUMERATOR_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/UniqueVector.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/UseListOrder.h" #include <vector> namespace llvm { class Type; class Value; class Instruction; class BasicBlock; class Comdat; class Function; class Module; class Metadata; class LocalAsMetadata; class MDNode; class NamedMDNode; class AttributeSet; class ValueSymbolTable; class MDSymbolTable; class raw_ostream; class ValueEnumerator { public: typedef std::vector<Type*> TypeList; // For each value, we remember its Value* and occurrence frequency. typedef std::vector<std::pair<const Value*, unsigned> > ValueList; UseListOrderStack UseListOrders; private: typedef DenseMap<Type*, unsigned> TypeMapType; TypeMapType TypeMap; TypeList Types; typedef DenseMap<const Value*, unsigned> ValueMapType; ValueMapType ValueMap; ValueList Values; typedef UniqueVector<const Comdat *> ComdatSetType; ComdatSetType Comdats; std::vector<const Metadata *> MDs; SmallVector<const LocalAsMetadata *, 8> FunctionLocalMDs; typedef DenseMap<const Metadata *, unsigned> MetadataMapType; MetadataMapType MDValueMap; bool HasMDString; bool HasDILocation; bool HasGenericDINode; bool ShouldPreserveUseListOrder; typedef DenseMap<AttributeSet, unsigned> AttributeGroupMapType; AttributeGroupMapType AttributeGroupMap; std::vector<AttributeSet> AttributeGroups; typedef DenseMap<AttributeSet, unsigned> AttributeMapType; AttributeMapType AttributeMap; std::vector<AttributeSet> Attribute; /// GlobalBasicBlockIDs - This map memoizes the basic block ID's referenced by /// the "getGlobalBasicBlockID" method. mutable DenseMap<const BasicBlock*, unsigned> GlobalBasicBlockIDs; typedef DenseMap<const Instruction*, unsigned> InstructionMapType; InstructionMapType InstructionMap; unsigned InstructionCount; /// BasicBlocks - This contains all the basic blocks for the currently /// incorporated function. Their reverse mapping is stored in ValueMap. std::vector<const BasicBlock*> BasicBlocks; /// When a function is incorporated, this is the size of the Values list /// before incorporation. unsigned NumModuleValues; /// When a function is incorporated, this is the size of the MDValues list /// before incorporation. unsigned NumModuleMDs; unsigned FirstFuncConstantID; unsigned FirstInstID; ValueEnumerator(const ValueEnumerator &) = delete; void operator=(const ValueEnumerator &) = delete; public: ValueEnumerator(const Module &M, bool ShouldPreserveUseListOrder); void dump() const; void print(raw_ostream &OS, const ValueMapType &Map, const char *Name) const; void print(raw_ostream &OS, const MetadataMapType &Map, const char *Name) const; unsigned getValueID(const Value *V) const; unsigned getMetadataID(const Metadata *MD) const { auto ID = getMetadataOrNullID(MD); assert(ID != 0 && "Metadata not in slotcalculator!"); return ID - 1; } unsigned getMetadataOrNullID(const Metadata *MD) const { return MDValueMap.lookup(MD); } bool hasMDString() const { return HasMDString; } bool hasDILocation() const { return HasDILocation; } bool hasGenericDINode() const { return HasGenericDINode; } bool shouldPreserveUseListOrder() const { return ShouldPreserveUseListOrder; } unsigned getTypeID(Type *T) const { TypeMapType::const_iterator I = TypeMap.find(T); assert(I != TypeMap.end() && "Type not in ValueEnumerator!"); return I->second-1; } unsigned getInstructionID(const Instruction *I) const; void setInstructionID(const Instruction *I); unsigned getAttributeID(AttributeSet PAL) const { if (PAL.isEmpty()) return 0; // Null maps to zero. AttributeMapType::const_iterator I = AttributeMap.find(PAL); assert(I != AttributeMap.end() && "Attribute not in ValueEnumerator!"); return I->second; } unsigned getAttributeGroupID(AttributeSet PAL) const { if (PAL.isEmpty()) return 0; // Null maps to zero. AttributeGroupMapType::const_iterator I = AttributeGroupMap.find(PAL); assert(I != AttributeGroupMap.end() && "Attribute not in ValueEnumerator!"); return I->second; } /// getFunctionConstantRange - Return the range of values that corresponds to /// function-local constants. void getFunctionConstantRange(unsigned &Start, unsigned &End) const { Start = FirstFuncConstantID; End = FirstInstID; } const ValueList &getValues() const { return Values; } const std::vector<const Metadata *> &getMDs() const { return MDs; } const SmallVectorImpl<const LocalAsMetadata *> &getFunctionLocalMDs() const { return FunctionLocalMDs; } const TypeList &getTypes() const { return Types; } const std::vector<const BasicBlock*> &getBasicBlocks() const { return BasicBlocks; } const std::vector<AttributeSet> &getAttributes() const { return Attribute; } const std::vector<AttributeSet> &getAttributeGroups() const { return AttributeGroups; } const ComdatSetType &getComdats() const { return Comdats; } unsigned getComdatID(const Comdat *C) const; /// getGlobalBasicBlockID - This returns the function-specific ID for the /// specified basic block. This is relatively expensive information, so it /// should only be used by rare constructs such as address-of-label. unsigned getGlobalBasicBlockID(const BasicBlock *BB) const; /// incorporateFunction/purgeFunction - If you'd like to deal with a function, /// use these two methods to get its data into the ValueEnumerator! /// void incorporateFunction(const Function &F); void purgeFunction(); uint64_t computeBitsRequiredForTypeIndicies() const; private: void OptimizeConstants(unsigned CstStart, unsigned CstEnd); void EnumerateMDNodeOperands(const MDNode *N); void EnumerateMetadata(const Metadata *MD); void EnumerateFunctionLocalMetadata(const LocalAsMetadata *Local); void EnumerateNamedMDNode(const NamedMDNode *NMD); void EnumerateValue(const Value *V); void EnumerateType(Type *T); void EnumerateOperandType(const Value *V); void EnumerateAttributes(AttributeSet PAL); void EnumerateValueSymbolTable(const ValueSymbolTable &ST); void EnumerateNamedMetadata(const Module &M); }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/lib/Bitcode
repos/DirectXShaderCompiler/lib/Bitcode/Writer/BitcodeWriter.cpp
//===--- Bitcode/Writer/BitcodeWriter.cpp - Bitcode Writer ----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Bitcode writer implementation. // //===----------------------------------------------------------------------===// #include "llvm/Bitcode/ReaderWriter.h" #include "ValueEnumerator.h" #include "llvm/ADT/Triple.h" #include "llvm/Bitcode/BitstreamWriter.h" #include "llvm/Bitcode/LLVMBitCodes.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/IR/UseListOrder.h" #include "llvm/IR/ValueSymbolTable.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/Program.h" #include "llvm/Support/raw_ostream.h" #include <cctype> #include <map> using namespace llvm; /// These are manifest constants used by the bitcode writer. They do not need to /// be kept in sync with the reader, but need to be consistent within this file. enum { // VALUE_SYMTAB_BLOCK abbrev id's. VST_ENTRY_8_ABBREV = bitc::FIRST_APPLICATION_ABBREV, VST_ENTRY_7_ABBREV, VST_ENTRY_6_ABBREV, VST_BBENTRY_6_ABBREV, // CONSTANTS_BLOCK abbrev id's. CONSTANTS_SETTYPE_ABBREV = bitc::FIRST_APPLICATION_ABBREV, CONSTANTS_INTEGER_ABBREV, CONSTANTS_CE_CAST_Abbrev, CONSTANTS_NULL_Abbrev, // FUNCTION_BLOCK abbrev id's. FUNCTION_INST_LOAD_ABBREV = bitc::FIRST_APPLICATION_ABBREV, FUNCTION_INST_BINOP_ABBREV, FUNCTION_INST_BINOP_FLAGS_ABBREV, FUNCTION_INST_CAST_ABBREV, FUNCTION_INST_RET_VOID_ABBREV, FUNCTION_INST_RET_VAL_ABBREV, FUNCTION_INST_UNREACHABLE_ABBREV, FUNCTION_INST_GEP_ABBREV, }; static unsigned GetEncodedCastOpcode(unsigned Opcode) { switch (Opcode) { default: llvm_unreachable("Unknown cast instruction!"); case Instruction::Trunc : return bitc::CAST_TRUNC; case Instruction::ZExt : return bitc::CAST_ZEXT; case Instruction::SExt : return bitc::CAST_SEXT; case Instruction::FPToUI : return bitc::CAST_FPTOUI; case Instruction::FPToSI : return bitc::CAST_FPTOSI; case Instruction::UIToFP : return bitc::CAST_UITOFP; case Instruction::SIToFP : return bitc::CAST_SITOFP; case Instruction::FPTrunc : return bitc::CAST_FPTRUNC; case Instruction::FPExt : return bitc::CAST_FPEXT; case Instruction::PtrToInt: return bitc::CAST_PTRTOINT; case Instruction::IntToPtr: return bitc::CAST_INTTOPTR; case Instruction::BitCast : return bitc::CAST_BITCAST; case Instruction::AddrSpaceCast: return bitc::CAST_ADDRSPACECAST; } } static unsigned GetEncodedBinaryOpcode(unsigned Opcode) { switch (Opcode) { default: llvm_unreachable("Unknown binary instruction!"); case Instruction::Add: case Instruction::FAdd: return bitc::BINOP_ADD; case Instruction::Sub: case Instruction::FSub: return bitc::BINOP_SUB; case Instruction::Mul: case Instruction::FMul: return bitc::BINOP_MUL; case Instruction::UDiv: return bitc::BINOP_UDIV; case Instruction::FDiv: case Instruction::SDiv: return bitc::BINOP_SDIV; case Instruction::URem: return bitc::BINOP_UREM; case Instruction::FRem: case Instruction::SRem: return bitc::BINOP_SREM; case Instruction::Shl: return bitc::BINOP_SHL; case Instruction::LShr: return bitc::BINOP_LSHR; case Instruction::AShr: return bitc::BINOP_ASHR; case Instruction::And: return bitc::BINOP_AND; case Instruction::Or: return bitc::BINOP_OR; case Instruction::Xor: return bitc::BINOP_XOR; } } static unsigned GetEncodedRMWOperation(AtomicRMWInst::BinOp Op) { switch (Op) { default: llvm_unreachable("Unknown RMW operation!"); case AtomicRMWInst::Xchg: return bitc::RMW_XCHG; case AtomicRMWInst::Add: return bitc::RMW_ADD; case AtomicRMWInst::Sub: return bitc::RMW_SUB; case AtomicRMWInst::And: return bitc::RMW_AND; case AtomicRMWInst::Nand: return bitc::RMW_NAND; case AtomicRMWInst::Or: return bitc::RMW_OR; case AtomicRMWInst::Xor: return bitc::RMW_XOR; case AtomicRMWInst::Max: return bitc::RMW_MAX; case AtomicRMWInst::Min: return bitc::RMW_MIN; case AtomicRMWInst::UMax: return bitc::RMW_UMAX; case AtomicRMWInst::UMin: return bitc::RMW_UMIN; } } static unsigned GetEncodedOrdering(AtomicOrdering Ordering) { switch (Ordering) { case NotAtomic: return bitc::ORDERING_NOTATOMIC; case Unordered: return bitc::ORDERING_UNORDERED; case Monotonic: return bitc::ORDERING_MONOTONIC; case Acquire: return bitc::ORDERING_ACQUIRE; case Release: return bitc::ORDERING_RELEASE; case AcquireRelease: return bitc::ORDERING_ACQREL; case SequentiallyConsistent: return bitc::ORDERING_SEQCST; } llvm_unreachable("Invalid ordering"); } static unsigned GetEncodedSynchScope(SynchronizationScope SynchScope) { switch (SynchScope) { case SingleThread: return bitc::SYNCHSCOPE_SINGLETHREAD; case CrossThread: return bitc::SYNCHSCOPE_CROSSTHREAD; } llvm_unreachable("Invalid synch scope"); } static void WriteStringRecord(unsigned Code, StringRef Str, unsigned AbbrevToUse, BitstreamWriter &Stream) { SmallVector<unsigned, 64> Vals; // Code: [strchar x N] for (unsigned i = 0, e = Str.size(); i != e; ++i) { if (AbbrevToUse && !BitCodeAbbrevOp::isChar6(Str[i])) AbbrevToUse = 0; Vals.push_back(Str[i]); } // Emit the finished record. Stream.EmitRecord(Code, Vals, AbbrevToUse); } static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) { switch (Kind) { case Attribute::Alignment: return bitc::ATTR_KIND_ALIGNMENT; case Attribute::AlwaysInline: return bitc::ATTR_KIND_ALWAYS_INLINE; case Attribute::ArgMemOnly: return bitc::ATTR_KIND_ARGMEMONLY; case Attribute::Builtin: return bitc::ATTR_KIND_BUILTIN; case Attribute::ByVal: return bitc::ATTR_KIND_BY_VAL; case Attribute::Convergent: return bitc::ATTR_KIND_CONVERGENT; case Attribute::InAlloca: return bitc::ATTR_KIND_IN_ALLOCA; case Attribute::Cold: return bitc::ATTR_KIND_COLD; case Attribute::InlineHint: return bitc::ATTR_KIND_INLINE_HINT; case Attribute::InReg: return bitc::ATTR_KIND_IN_REG; case Attribute::JumpTable: return bitc::ATTR_KIND_JUMP_TABLE; case Attribute::MinSize: return bitc::ATTR_KIND_MIN_SIZE; case Attribute::Naked: return bitc::ATTR_KIND_NAKED; case Attribute::Nest: return bitc::ATTR_KIND_NEST; case Attribute::NoAlias: return bitc::ATTR_KIND_NO_ALIAS; case Attribute::NoBuiltin: return bitc::ATTR_KIND_NO_BUILTIN; case Attribute::NoCapture: return bitc::ATTR_KIND_NO_CAPTURE; case Attribute::NoDuplicate: return bitc::ATTR_KIND_NO_DUPLICATE; case Attribute::NoImplicitFloat: return bitc::ATTR_KIND_NO_IMPLICIT_FLOAT; case Attribute::NoInline: return bitc::ATTR_KIND_NO_INLINE; case Attribute::NonLazyBind: return bitc::ATTR_KIND_NON_LAZY_BIND; case Attribute::NonNull: return bitc::ATTR_KIND_NON_NULL; case Attribute::Dereferenceable: return bitc::ATTR_KIND_DEREFERENCEABLE; case Attribute::DereferenceableOrNull: return bitc::ATTR_KIND_DEREFERENCEABLE_OR_NULL; case Attribute::NoRedZone: return bitc::ATTR_KIND_NO_RED_ZONE; case Attribute::NoReturn: return bitc::ATTR_KIND_NO_RETURN; case Attribute::NoUnwind: return bitc::ATTR_KIND_NO_UNWIND; case Attribute::OptimizeForSize: return bitc::ATTR_KIND_OPTIMIZE_FOR_SIZE; case Attribute::OptimizeNone: return bitc::ATTR_KIND_OPTIMIZE_NONE; case Attribute::ReadNone: return bitc::ATTR_KIND_READ_NONE; case Attribute::ReadOnly: return bitc::ATTR_KIND_READ_ONLY; case Attribute::Returned: return bitc::ATTR_KIND_RETURNED; case Attribute::ReturnsTwice: return bitc::ATTR_KIND_RETURNS_TWICE; case Attribute::SExt: return bitc::ATTR_KIND_S_EXT; case Attribute::StackAlignment: return bitc::ATTR_KIND_STACK_ALIGNMENT; case Attribute::StackProtect: return bitc::ATTR_KIND_STACK_PROTECT; case Attribute::StackProtectReq: return bitc::ATTR_KIND_STACK_PROTECT_REQ; case Attribute::StackProtectStrong: return bitc::ATTR_KIND_STACK_PROTECT_STRONG; case Attribute::SafeStack: return bitc::ATTR_KIND_SAFESTACK; case Attribute::StructRet: return bitc::ATTR_KIND_STRUCT_RET; case Attribute::SanitizeAddress: return bitc::ATTR_KIND_SANITIZE_ADDRESS; case Attribute::SanitizeThread: return bitc::ATTR_KIND_SANITIZE_THREAD; case Attribute::SanitizeMemory: return bitc::ATTR_KIND_SANITIZE_MEMORY; case Attribute::UWTable: return bitc::ATTR_KIND_UW_TABLE; case Attribute::ZExt: return bitc::ATTR_KIND_Z_EXT; case Attribute::EndAttrKinds: llvm_unreachable("Can not encode end-attribute kinds marker."); case Attribute::None: llvm_unreachable("Can not encode none-attribute."); } llvm_unreachable("Trying to encode unknown attribute"); } static void WriteAttributeGroupTable(const ValueEnumerator &VE, BitstreamWriter &Stream) { const std::vector<AttributeSet> &AttrGrps = VE.getAttributeGroups(); if (AttrGrps.empty()) return; Stream.EnterSubblock(bitc::PARAMATTR_GROUP_BLOCK_ID, 3); SmallVector<uint64_t, 64> Record; for (unsigned i = 0, e = AttrGrps.size(); i != e; ++i) { AttributeSet AS = AttrGrps[i]; for (unsigned i = 0, e = AS.getNumSlots(); i != e; ++i) { AttributeSet A = AS.getSlotAttributes(i); Record.push_back(VE.getAttributeGroupID(A)); Record.push_back(AS.getSlotIndex(i)); for (AttributeSet::iterator I = AS.begin(0), E = AS.end(0); I != E; ++I) { Attribute Attr = *I; if (Attr.isEnumAttribute()) { Record.push_back(0); Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum())); } else if (Attr.isIntAttribute()) { Record.push_back(1); Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum())); Record.push_back(Attr.getValueAsInt()); } else { StringRef Kind = Attr.getKindAsString(); StringRef Val = Attr.getValueAsString(); Record.push_back(Val.empty() ? 3 : 4); Record.append(Kind.begin(), Kind.end()); Record.push_back(0); if (!Val.empty()) { Record.append(Val.begin(), Val.end()); Record.push_back(0); } } } Stream.EmitRecord(bitc::PARAMATTR_GRP_CODE_ENTRY, Record); Record.clear(); } } Stream.ExitBlock(); } static void WriteAttributeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) { const std::vector<AttributeSet> &Attrs = VE.getAttributes(); if (Attrs.empty()) return; Stream.EnterSubblock(bitc::PARAMATTR_BLOCK_ID, 3); SmallVector<uint64_t, 64> Record; for (unsigned i = 0, e = Attrs.size(); i != e; ++i) { const AttributeSet &A = Attrs[i]; for (unsigned i = 0, e = A.getNumSlots(); i != e; ++i) Record.push_back(VE.getAttributeGroupID(A.getSlotAttributes(i))); Stream.EmitRecord(bitc::PARAMATTR_CODE_ENTRY, Record); Record.clear(); } Stream.ExitBlock(); } /// WriteTypeTable - Write out the type table for a module. static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) { const ValueEnumerator::TypeList &TypeList = VE.getTypes(); Stream.EnterSubblock(bitc::TYPE_BLOCK_ID_NEW, 4 /*count from # abbrevs */); SmallVector<uint64_t, 64> TypeVals; uint64_t NumBits = VE.computeBitsRequiredForTypeIndicies(); // Abbrev for TYPE_CODE_POINTER. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_POINTER)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); Abbv->Add(BitCodeAbbrevOp(0)); // Addrspace = 0 unsigned PtrAbbrev = Stream.EmitAbbrev(Abbv.get()); // Abbrev for TYPE_CODE_FUNCTION. Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_FUNCTION)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isvararg Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); unsigned FunctionAbbrev = Stream.EmitAbbrev(Abbv.get()); // Abbrev for TYPE_CODE_STRUCT_ANON. Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_ANON)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ispacked Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); unsigned StructAnonAbbrev = Stream.EmitAbbrev(Abbv.get()); // Abbrev for TYPE_CODE_STRUCT_NAME. Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_NAME)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); unsigned StructNameAbbrev = Stream.EmitAbbrev(Abbv.get()); // Abbrev for TYPE_CODE_STRUCT_NAMED. Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_NAMED)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ispacked Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); unsigned StructNamedAbbrev = Stream.EmitAbbrev(Abbv.get()); // Abbrev for TYPE_CODE_ARRAY. Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_ARRAY)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // size Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); unsigned ArrayAbbrev = Stream.EmitAbbrev(Abbv.get()); // Emit an entry count so the reader can reserve space. TypeVals.push_back(TypeList.size()); Stream.EmitRecord(bitc::TYPE_CODE_NUMENTRY, TypeVals); TypeVals.clear(); // Loop over all of the types, emitting each in turn. for (unsigned i = 0, e = TypeList.size(); i != e; ++i) { Type *T = TypeList[i]; int AbbrevToUse = 0; unsigned Code = 0; switch (T->getTypeID()) { case Type::VoidTyID: Code = bitc::TYPE_CODE_VOID; break; case Type::HalfTyID: Code = bitc::TYPE_CODE_HALF; break; case Type::FloatTyID: Code = bitc::TYPE_CODE_FLOAT; break; case Type::DoubleTyID: Code = bitc::TYPE_CODE_DOUBLE; break; case Type::X86_FP80TyID: Code = bitc::TYPE_CODE_X86_FP80; break; case Type::FP128TyID: Code = bitc::TYPE_CODE_FP128; break; case Type::PPC_FP128TyID: Code = bitc::TYPE_CODE_PPC_FP128; break; case Type::LabelTyID: Code = bitc::TYPE_CODE_LABEL; break; case Type::MetadataTyID: Code = bitc::TYPE_CODE_METADATA; break; case Type::X86_MMXTyID: Code = bitc::TYPE_CODE_X86_MMX; break; case Type::IntegerTyID: // INTEGER: [width] Code = bitc::TYPE_CODE_INTEGER; TypeVals.push_back(cast<IntegerType>(T)->getBitWidth()); break; case Type::PointerTyID: { PointerType *PTy = cast<PointerType>(T); // POINTER: [pointee type, address space] Code = bitc::TYPE_CODE_POINTER; TypeVals.push_back(VE.getTypeID(PTy->getElementType())); unsigned AddressSpace = PTy->getAddressSpace(); TypeVals.push_back(AddressSpace); if (AddressSpace == 0) AbbrevToUse = PtrAbbrev; break; } case Type::FunctionTyID: { FunctionType *FT = cast<FunctionType>(T); // FUNCTION: [isvararg, retty, paramty x N] Code = bitc::TYPE_CODE_FUNCTION; TypeVals.push_back(FT->isVarArg()); TypeVals.push_back(VE.getTypeID(FT->getReturnType())); for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) TypeVals.push_back(VE.getTypeID(FT->getParamType(i))); AbbrevToUse = FunctionAbbrev; break; } case Type::StructTyID: { StructType *ST = cast<StructType>(T); // STRUCT: [ispacked, eltty x N] TypeVals.push_back(ST->isPacked()); // Output all of the element types. for (StructType::element_iterator I = ST->element_begin(), E = ST->element_end(); I != E; ++I) TypeVals.push_back(VE.getTypeID(*I)); if (ST->isLiteral()) { Code = bitc::TYPE_CODE_STRUCT_ANON; AbbrevToUse = StructAnonAbbrev; } else { if (ST->isOpaque()) { Code = bitc::TYPE_CODE_OPAQUE; } else { Code = bitc::TYPE_CODE_STRUCT_NAMED; AbbrevToUse = StructNamedAbbrev; } // Emit the name if it is present. if (!ST->getName().empty()) WriteStringRecord(bitc::TYPE_CODE_STRUCT_NAME, ST->getName(), StructNameAbbrev, Stream); } break; } case Type::ArrayTyID: { ArrayType *AT = cast<ArrayType>(T); // ARRAY: [numelts, eltty] Code = bitc::TYPE_CODE_ARRAY; TypeVals.push_back(AT->getNumElements()); TypeVals.push_back(VE.getTypeID(AT->getElementType())); AbbrevToUse = ArrayAbbrev; break; } case Type::VectorTyID: { VectorType *VT = cast<VectorType>(T); // VECTOR [numelts, eltty] Code = bitc::TYPE_CODE_VECTOR; TypeVals.push_back(VT->getNumElements()); TypeVals.push_back(VE.getTypeID(VT->getElementType())); break; } } // Emit the finished record. Stream.EmitRecord(Code, TypeVals, AbbrevToUse); TypeVals.clear(); } Stream.ExitBlock(); } static unsigned getEncodedLinkage(const GlobalValue &GV) { switch (GV.getLinkage()) { case GlobalValue::ExternalLinkage: return 0; case GlobalValue::WeakAnyLinkage: return 16; case GlobalValue::AppendingLinkage: return 2; case GlobalValue::InternalLinkage: return 3; case GlobalValue::LinkOnceAnyLinkage: return 18; case GlobalValue::ExternalWeakLinkage: return 7; case GlobalValue::CommonLinkage: return 8; case GlobalValue::PrivateLinkage: return 9; case GlobalValue::WeakODRLinkage: return 17; case GlobalValue::LinkOnceODRLinkage: return 19; case GlobalValue::AvailableExternallyLinkage: return 12; } llvm_unreachable("Invalid linkage"); } static unsigned getEncodedVisibility(const GlobalValue &GV) { switch (GV.getVisibility()) { case GlobalValue::DefaultVisibility: return 0; case GlobalValue::HiddenVisibility: return 1; case GlobalValue::ProtectedVisibility: return 2; } llvm_unreachable("Invalid visibility"); } static unsigned getEncodedDLLStorageClass(const GlobalValue &GV) { switch (GV.getDLLStorageClass()) { case GlobalValue::DefaultStorageClass: return 0; case GlobalValue::DLLImportStorageClass: return 1; case GlobalValue::DLLExportStorageClass: return 2; } llvm_unreachable("Invalid DLL storage class"); } static unsigned getEncodedThreadLocalMode(const GlobalValue &GV) { switch (GV.getThreadLocalMode()) { case GlobalVariable::NotThreadLocal: return 0; case GlobalVariable::GeneralDynamicTLSModel: return 1; case GlobalVariable::LocalDynamicTLSModel: return 2; case GlobalVariable::InitialExecTLSModel: return 3; case GlobalVariable::LocalExecTLSModel: return 4; } llvm_unreachable("Invalid TLS model"); } static unsigned getEncodedComdatSelectionKind(const Comdat &C) { switch (C.getSelectionKind()) { case Comdat::Any: return bitc::COMDAT_SELECTION_KIND_ANY; case Comdat::ExactMatch: return bitc::COMDAT_SELECTION_KIND_EXACT_MATCH; case Comdat::Largest: return bitc::COMDAT_SELECTION_KIND_LARGEST; case Comdat::NoDuplicates: return bitc::COMDAT_SELECTION_KIND_NO_DUPLICATES; case Comdat::SameSize: return bitc::COMDAT_SELECTION_KIND_SAME_SIZE; } llvm_unreachable("Invalid selection kind"); } static void writeComdats(const ValueEnumerator &VE, BitstreamWriter &Stream) { SmallVector<uint16_t, 64> Vals; for (const Comdat *C : VE.getComdats()) { // COMDAT: [selection_kind, name] Vals.push_back(getEncodedComdatSelectionKind(*C)); size_t Size = C->getName().size(); assert(isUInt<16>(Size)); Vals.push_back(Size); for (char Chr : C->getName()) Vals.push_back((unsigned char)Chr); Stream.EmitRecord(bitc::MODULE_CODE_COMDAT, Vals, /*AbbrevToUse=*/0); Vals.clear(); } } // Emit top-level description of module, including target triple, inline asm, // descriptors for global variables, and function prototype info. static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE, BitstreamWriter &Stream) { // Emit various pieces of data attached to a module. if (!M->getTargetTriple().empty()) WriteStringRecord(bitc::MODULE_CODE_TRIPLE, M->getTargetTriple(), 0/*TODO*/, Stream); const std::string &DL = M->getDataLayoutStr(); if (!DL.empty()) WriteStringRecord(bitc::MODULE_CODE_DATALAYOUT, DL, 0 /*TODO*/, Stream); if (!M->getModuleInlineAsm().empty()) WriteStringRecord(bitc::MODULE_CODE_ASM, M->getModuleInlineAsm(), 0/*TODO*/, Stream); // Emit information about sections and GC, computing how many there are. Also // compute the maximum alignment value. std::map<std::string, unsigned> SectionMap; std::map<std::string, unsigned> GCMap; unsigned MaxAlignment = 0; unsigned MaxGlobalType = 0; for (const GlobalValue &GV : M->globals()) { MaxAlignment = std::max(MaxAlignment, GV.getAlignment()); MaxGlobalType = std::max(MaxGlobalType, VE.getTypeID(GV.getValueType())); if (GV.hasSection()) { // Give section names unique ID's. unsigned &Entry = SectionMap[GV.getSection()]; if (!Entry) { WriteStringRecord(bitc::MODULE_CODE_SECTIONNAME, GV.getSection(), 0/*TODO*/, Stream); Entry = SectionMap.size(); } } } for (const Function &F : *M) { MaxAlignment = std::max(MaxAlignment, F.getAlignment()); if (F.hasSection()) { // Give section names unique ID's. unsigned &Entry = SectionMap[F.getSection()]; if (!Entry) { WriteStringRecord(bitc::MODULE_CODE_SECTIONNAME, F.getSection(), 0/*TODO*/, Stream); Entry = SectionMap.size(); } } if (F.hasGC()) { // Same for GC names. unsigned &Entry = GCMap[F.getGC()]; if (!Entry) { WriteStringRecord(bitc::MODULE_CODE_GCNAME, F.getGC(), 0/*TODO*/, Stream); Entry = GCMap.size(); } } } // Emit abbrev for globals, now that we know # sections and max alignment. unsigned SimpleGVarAbbrev = 0; if (!M->global_empty()) { // Add an abbrev for common globals with no visibility or thread localness. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_GLOBALVAR)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, Log2_32_Ceil(MaxGlobalType+1))); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // AddrSpace << 2 //| explicitType << 1 //| constant Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Initializer. Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 5)); // Linkage. if (MaxAlignment == 0) // Alignment. Abbv->Add(BitCodeAbbrevOp(0)); else { unsigned MaxEncAlignment = Log2_32(MaxAlignment)+1; Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, Log2_32_Ceil(MaxEncAlignment+1))); } if (SectionMap.empty()) // Section. Abbv->Add(BitCodeAbbrevOp(0)); else Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, Log2_32_Ceil(SectionMap.size()+1))); // Don't bother emitting vis + thread local. SimpleGVarAbbrev = Stream.EmitAbbrev(Abbv.get()); } // Emit the global variable information. SmallVector<unsigned, 64> Vals; for (const GlobalVariable &GV : M->globals()) { unsigned AbbrevToUse = 0; // GLOBALVAR: [type, isconst, initid, // linkage, alignment, section, visibility, threadlocal, // unnamed_addr, externally_initialized, dllstorageclass, // comdat] Vals.push_back(VE.getTypeID(GV.getValueType())); Vals.push_back(GV.getType()->getAddressSpace() << 2 | 2 | (GV.isConstant() ? 1 : 0)); // HLSL Change - bitwise | was used with unsigned int and bool Vals.push_back(GV.isDeclaration() ? 0 : (VE.getValueID(GV.getInitializer()) + 1)); Vals.push_back(getEncodedLinkage(GV)); Vals.push_back(Log2_32(GV.getAlignment())+1); Vals.push_back(GV.hasSection() ? SectionMap[GV.getSection()] : 0); if (GV.isThreadLocal() || GV.getVisibility() != GlobalValue::DefaultVisibility || GV.hasUnnamedAddr() || GV.isExternallyInitialized() || GV.getDLLStorageClass() != GlobalValue::DefaultStorageClass || GV.hasComdat()) { Vals.push_back(getEncodedVisibility(GV)); Vals.push_back(getEncodedThreadLocalMode(GV)); Vals.push_back(GV.hasUnnamedAddr()); Vals.push_back(GV.isExternallyInitialized()); Vals.push_back(getEncodedDLLStorageClass(GV)); Vals.push_back(GV.hasComdat() ? VE.getComdatID(GV.getComdat()) : 0); } else { AbbrevToUse = SimpleGVarAbbrev; } Stream.EmitRecord(bitc::MODULE_CODE_GLOBALVAR, Vals, AbbrevToUse); Vals.clear(); } // Emit the function proto information. for (const Function &F : *M) { // FUNCTION: [type, callingconv, isproto, linkage, paramattrs, alignment, // section, visibility, gc, unnamed_addr, prologuedata, // dllstorageclass, comdat, prefixdata, personalityfn] Vals.push_back(VE.getTypeID(F.getFunctionType())); Vals.push_back(F.getCallingConv()); Vals.push_back(F.isDeclaration()); Vals.push_back(getEncodedLinkage(F)); Vals.push_back(VE.getAttributeID(F.getAttributes())); Vals.push_back(Log2_32(F.getAlignment())+1); Vals.push_back(F.hasSection() ? SectionMap[F.getSection()] : 0); Vals.push_back(getEncodedVisibility(F)); Vals.push_back(F.hasGC() ? GCMap[F.getGC()] : 0); Vals.push_back(F.hasUnnamedAddr()); Vals.push_back(F.hasPrologueData() ? (VE.getValueID(F.getPrologueData()) + 1) : 0); Vals.push_back(getEncodedDLLStorageClass(F)); Vals.push_back(F.hasComdat() ? VE.getComdatID(F.getComdat()) : 0); Vals.push_back(F.hasPrefixData() ? (VE.getValueID(F.getPrefixData()) + 1) : 0); Vals.push_back( F.hasPersonalityFn() ? (VE.getValueID(F.getPersonalityFn()) + 1) : 0); unsigned AbbrevToUse = 0; Stream.EmitRecord(bitc::MODULE_CODE_FUNCTION, Vals, AbbrevToUse); Vals.clear(); } // Emit the alias information. for (const GlobalAlias &A : M->aliases()) { // ALIAS: [alias type, aliasee val#, linkage, visibility] Vals.push_back(VE.getTypeID(A.getType())); Vals.push_back(VE.getValueID(A.getAliasee())); Vals.push_back(getEncodedLinkage(A)); Vals.push_back(getEncodedVisibility(A)); Vals.push_back(getEncodedDLLStorageClass(A)); Vals.push_back(getEncodedThreadLocalMode(A)); Vals.push_back(A.hasUnnamedAddr()); unsigned AbbrevToUse = 0; Stream.EmitRecord(bitc::MODULE_CODE_ALIAS, Vals, AbbrevToUse); Vals.clear(); } } static uint64_t GetOptimizationFlags(const Value *V) { uint64_t Flags = 0; if (const auto *OBO = dyn_cast<OverflowingBinaryOperator>(V)) { if (OBO->hasNoSignedWrap()) Flags |= 1 << bitc::OBO_NO_SIGNED_WRAP; if (OBO->hasNoUnsignedWrap()) Flags |= 1 << bitc::OBO_NO_UNSIGNED_WRAP; } else if (const auto *PEO = dyn_cast<PossiblyExactOperator>(V)) { if (PEO->isExact()) Flags |= 1 << bitc::PEO_EXACT; } else if (const auto *FPMO = dyn_cast<FPMathOperator>(V)) { if (FPMO->hasUnsafeAlgebra()) Flags |= FastMathFlags::UnsafeAlgebra; if (FPMO->hasNoNaNs()) Flags |= FastMathFlags::NoNaNs; if (FPMO->hasNoInfs()) Flags |= FastMathFlags::NoInfs; if (FPMO->hasNoSignedZeros()) Flags |= FastMathFlags::NoSignedZeros; if (FPMO->hasAllowReciprocal()) Flags |= FastMathFlags::AllowReciprocal; } return Flags; } static void WriteValueAsMetadata(const ValueAsMetadata *MD, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record) { // Mimic an MDNode with a value as one operand. Value *V = MD->getValue(); Record.push_back(VE.getTypeID(V->getType())); Record.push_back(VE.getValueID(V)); Stream.EmitRecord(bitc::METADATA_VALUE, Record, 0); Record.clear(); } static void WriteMDTuple(const MDTuple *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { Metadata *MD = N->getOperand(i); assert(!(MD && isa<LocalAsMetadata>(MD)) && "Unexpected function-local metadata"); Record.push_back(VE.getMetadataOrNullID(MD)); } Stream.EmitRecord(N->isDistinct() ? bitc::METADATA_DISTINCT_NODE : bitc::METADATA_NODE, Record, Abbrev); Record.clear(); } static void WriteDILocation(const DILocation *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(N->getLine()); Record.push_back(N->getColumn()); Record.push_back(VE.getMetadataID(N->getScope())); Record.push_back(VE.getMetadataOrNullID(N->getInlinedAt())); Stream.EmitRecord(bitc::METADATA_LOCATION, Record, Abbrev); Record.clear(); } static void WriteGenericDINode(const GenericDINode *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(N->getTag()); Record.push_back(0); // Per-tag version field; unused for now. for (auto &I : N->operands()) Record.push_back(VE.getMetadataOrNullID(I)); Stream.EmitRecord(bitc::METADATA_GENERIC_DEBUG, Record, Abbrev); Record.clear(); } static uint64_t rotateSign(int64_t I) { uint64_t U = I; return I < 0 ? ~(U << 1) : U << 1; } static void WriteDISubrange(const DISubrange *N, const ValueEnumerator &, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(N->getCount()); Record.push_back(rotateSign(N->getLowerBound())); Stream.EmitRecord(bitc::METADATA_SUBRANGE, Record, Abbrev); Record.clear(); } static void WriteDIEnumerator(const DIEnumerator *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(rotateSign(N->getValue())); Record.push_back(VE.getMetadataOrNullID(N->getRawName())); Stream.EmitRecord(bitc::METADATA_ENUMERATOR, Record, Abbrev); Record.clear(); } static void WriteDIBasicType(const DIBasicType *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(N->getTag()); Record.push_back(VE.getMetadataOrNullID(N->getRawName())); Record.push_back(N->getSizeInBits()); Record.push_back(N->getAlignInBits()); Record.push_back(N->getEncoding()); Stream.EmitRecord(bitc::METADATA_BASIC_TYPE, Record, Abbrev); Record.clear(); } static void WriteDIDerivedType(const DIDerivedType *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(N->getTag()); Record.push_back(VE.getMetadataOrNullID(N->getRawName())); Record.push_back(VE.getMetadataOrNullID(N->getFile())); Record.push_back(N->getLine()); Record.push_back(VE.getMetadataOrNullID(N->getScope())); Record.push_back(VE.getMetadataOrNullID(N->getBaseType())); Record.push_back(N->getSizeInBits()); Record.push_back(N->getAlignInBits()); Record.push_back(N->getOffsetInBits()); Record.push_back(N->getFlags()); Record.push_back(VE.getMetadataOrNullID(N->getExtraData())); Stream.EmitRecord(bitc::METADATA_DERIVED_TYPE, Record, Abbrev); Record.clear(); } static void WriteDICompositeType(const DICompositeType *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(N->getTag()); Record.push_back(VE.getMetadataOrNullID(N->getRawName())); Record.push_back(VE.getMetadataOrNullID(N->getFile())); Record.push_back(N->getLine()); Record.push_back(VE.getMetadataOrNullID(N->getScope())); Record.push_back(VE.getMetadataOrNullID(N->getBaseType())); Record.push_back(N->getSizeInBits()); Record.push_back(N->getAlignInBits()); Record.push_back(N->getOffsetInBits()); Record.push_back(N->getFlags()); Record.push_back(VE.getMetadataOrNullID(N->getElements().get())); Record.push_back(N->getRuntimeLang()); Record.push_back(VE.getMetadataOrNullID(N->getVTableHolder())); Record.push_back(VE.getMetadataOrNullID(N->getTemplateParams().get())); Record.push_back(VE.getMetadataOrNullID(N->getRawIdentifier())); Stream.EmitRecord(bitc::METADATA_COMPOSITE_TYPE, Record, Abbrev); Record.clear(); } static void WriteDISubroutineType(const DISubroutineType *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(N->getFlags()); Record.push_back(VE.getMetadataOrNullID(N->getTypeArray().get())); Stream.EmitRecord(bitc::METADATA_SUBROUTINE_TYPE, Record, Abbrev); Record.clear(); } static void WriteDIFile(const DIFile *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(VE.getMetadataOrNullID(N->getRawFilename())); Record.push_back(VE.getMetadataOrNullID(N->getRawDirectory())); Stream.EmitRecord(bitc::METADATA_FILE, Record, Abbrev); Record.clear(); } static void WriteDICompileUnit(const DICompileUnit *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(N->getSourceLanguage()); Record.push_back(VE.getMetadataOrNullID(N->getFile())); Record.push_back(VE.getMetadataOrNullID(N->getRawProducer())); Record.push_back(N->isOptimized()); Record.push_back(VE.getMetadataOrNullID(N->getRawFlags())); Record.push_back(N->getRuntimeVersion()); Record.push_back(VE.getMetadataOrNullID(N->getRawSplitDebugFilename())); Record.push_back(N->getEmissionKind()); Record.push_back(VE.getMetadataOrNullID(N->getEnumTypes().get())); Record.push_back(VE.getMetadataOrNullID(N->getRetainedTypes().get())); Record.push_back(VE.getMetadataOrNullID(N->getSubprograms().get())); Record.push_back(VE.getMetadataOrNullID(N->getGlobalVariables().get())); Record.push_back(VE.getMetadataOrNullID(N->getImportedEntities().get())); Record.push_back(N->getDWOId()); Stream.EmitRecord(bitc::METADATA_COMPILE_UNIT, Record, Abbrev); Record.clear(); } static void WriteDISubprogram(const DISubprogram *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(VE.getMetadataOrNullID(N->getScope())); Record.push_back(VE.getMetadataOrNullID(N->getRawName())); Record.push_back(VE.getMetadataOrNullID(N->getRawLinkageName())); Record.push_back(VE.getMetadataOrNullID(N->getFile())); Record.push_back(N->getLine()); Record.push_back(VE.getMetadataOrNullID(N->getType())); Record.push_back(N->isLocalToUnit()); Record.push_back(N->isDefinition()); Record.push_back(N->getScopeLine()); Record.push_back(VE.getMetadataOrNullID(N->getContainingType())); Record.push_back(N->getVirtuality()); Record.push_back(N->getVirtualIndex()); Record.push_back(N->getFlags()); Record.push_back(N->isOptimized()); Record.push_back(VE.getMetadataOrNullID(N->getRawFunction())); Record.push_back(VE.getMetadataOrNullID(N->getTemplateParams().get())); Record.push_back(VE.getMetadataOrNullID(N->getDeclaration())); Record.push_back(VE.getMetadataOrNullID(N->getVariables().get())); Stream.EmitRecord(bitc::METADATA_SUBPROGRAM, Record, Abbrev); Record.clear(); } static void WriteDILexicalBlock(const DILexicalBlock *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(VE.getMetadataOrNullID(N->getScope())); Record.push_back(VE.getMetadataOrNullID(N->getFile())); Record.push_back(N->getLine()); Record.push_back(N->getColumn()); Stream.EmitRecord(bitc::METADATA_LEXICAL_BLOCK, Record, Abbrev); Record.clear(); } static void WriteDILexicalBlockFile(const DILexicalBlockFile *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(VE.getMetadataOrNullID(N->getScope())); Record.push_back(VE.getMetadataOrNullID(N->getFile())); Record.push_back(N->getDiscriminator()); Stream.EmitRecord(bitc::METADATA_LEXICAL_BLOCK_FILE, Record, Abbrev); Record.clear(); } static void WriteDINamespace(const DINamespace *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(VE.getMetadataOrNullID(N->getScope())); Record.push_back(VE.getMetadataOrNullID(N->getFile())); Record.push_back(VE.getMetadataOrNullID(N->getRawName())); Record.push_back(N->getLine()); Stream.EmitRecord(bitc::METADATA_NAMESPACE, Record, Abbrev); Record.clear(); } static void WriteDIModule(const DIModule *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); for (auto &I : N->operands()) Record.push_back(VE.getMetadataOrNullID(I)); Stream.EmitRecord(bitc::METADATA_MODULE, Record, Abbrev); Record.clear(); } static void WriteDITemplateTypeParameter(const DITemplateTypeParameter *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(VE.getMetadataOrNullID(N->getRawName())); Record.push_back(VE.getMetadataOrNullID(N->getType())); Stream.EmitRecord(bitc::METADATA_TEMPLATE_TYPE, Record, Abbrev); Record.clear(); } static void WriteDITemplateValueParameter(const DITemplateValueParameter *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(N->getTag()); Record.push_back(VE.getMetadataOrNullID(N->getRawName())); Record.push_back(VE.getMetadataOrNullID(N->getType())); Record.push_back(VE.getMetadataOrNullID(N->getValue())); Stream.EmitRecord(bitc::METADATA_TEMPLATE_VALUE, Record, Abbrev); Record.clear(); } static void WriteDIGlobalVariable(const DIGlobalVariable *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(VE.getMetadataOrNullID(N->getScope())); Record.push_back(VE.getMetadataOrNullID(N->getRawName())); Record.push_back(VE.getMetadataOrNullID(N->getRawLinkageName())); Record.push_back(VE.getMetadataOrNullID(N->getFile())); Record.push_back(N->getLine()); Record.push_back(VE.getMetadataOrNullID(N->getType())); Record.push_back(N->isLocalToUnit()); Record.push_back(N->isDefinition()); Record.push_back(VE.getMetadataOrNullID(N->getRawVariable())); Record.push_back(VE.getMetadataOrNullID(N->getStaticDataMemberDeclaration())); Stream.EmitRecord(bitc::METADATA_GLOBAL_VAR, Record, Abbrev); Record.clear(); } static void WriteDILocalVariable(const DILocalVariable *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(N->getTag()); Record.push_back(VE.getMetadataOrNullID(N->getScope())); Record.push_back(VE.getMetadataOrNullID(N->getRawName())); Record.push_back(VE.getMetadataOrNullID(N->getFile())); Record.push_back(N->getLine()); Record.push_back(VE.getMetadataOrNullID(N->getType())); Record.push_back(N->getArg()); Record.push_back(N->getFlags()); Stream.EmitRecord(bitc::METADATA_LOCAL_VAR, Record, Abbrev); Record.clear(); } static void WriteDIExpression(const DIExpression *N, const ValueEnumerator &, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.reserve(N->getElements().size() + 1); Record.push_back(N->isDistinct()); Record.append(N->elements_begin(), N->elements_end()); Stream.EmitRecord(bitc::METADATA_EXPRESSION, Record, Abbrev); Record.clear(); } static void WriteDIObjCProperty(const DIObjCProperty *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(VE.getMetadataOrNullID(N->getRawName())); Record.push_back(VE.getMetadataOrNullID(N->getFile())); Record.push_back(N->getLine()); Record.push_back(VE.getMetadataOrNullID(N->getRawSetterName())); Record.push_back(VE.getMetadataOrNullID(N->getRawGetterName())); Record.push_back(N->getAttributes()); Record.push_back(VE.getMetadataOrNullID(N->getType())); Stream.EmitRecord(bitc::METADATA_OBJC_PROPERTY, Record, Abbrev); Record.clear(); } static void WriteDIImportedEntity(const DIImportedEntity *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<uint64_t> &Record, unsigned Abbrev) { Record.push_back(N->isDistinct()); Record.push_back(N->getTag()); Record.push_back(VE.getMetadataOrNullID(N->getScope())); Record.push_back(VE.getMetadataOrNullID(N->getEntity())); Record.push_back(N->getLine()); Record.push_back(VE.getMetadataOrNullID(N->getRawName())); Stream.EmitRecord(bitc::METADATA_IMPORTED_ENTITY, Record, Abbrev); Record.clear(); } static void WriteModuleMetadata(const Module *M, const ValueEnumerator &VE, BitstreamWriter &Stream) { const auto &MDs = VE.getMDs(); if (MDs.empty() && M->named_metadata_empty()) return; Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3); unsigned MDSAbbrev = 0; if (VE.hasMDString()) { // Abbrev for METADATA_STRING. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_STRING)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); MDSAbbrev = Stream.EmitAbbrev(Abbv.get()); } // Initialize MDNode abbreviations. #define HANDLE_MDNODE_LEAF(CLASS) unsigned CLASS##Abbrev = 0; #include "llvm/IR/Metadata.def" if (VE.hasDILocation()) { // Abbrev for METADATA_LOCATION. // // Assume the column is usually under 128, and always output the inlined-at // location (it's never more expensive than building an array size 1). IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_LOCATION)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); DILocationAbbrev = Stream.EmitAbbrev(Abbv.get()); } if (VE.hasGenericDINode()) { // Abbrev for METADATA_GENERIC_DEBUG. // // Assume the column is usually under 128, and always output the inlined-at // location (it's never more expensive than building an array size 1). IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_GENERIC_DEBUG)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); GenericDINodeAbbrev = Stream.EmitAbbrev(Abbv.get()); } unsigned NameAbbrev = 0; if (!M->named_metadata_empty()) { // Abbrev for METADATA_NAME. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_NAME)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); NameAbbrev = Stream.EmitAbbrev(Abbv.get()); } SmallVector<uint64_t, 64> Record; for (const Metadata *MD : MDs) { if (const MDNode *N = dyn_cast<MDNode>(MD)) { assert(N->isResolved() && "Expected forward references to be resolved"); switch (N->getMetadataID()) { default: llvm_unreachable("Invalid MDNode subclass"); #define HANDLE_MDNODE_LEAF(CLASS) \ case Metadata::CLASS##Kind: \ Write##CLASS(cast<CLASS>(N), VE, Stream, Record, CLASS##Abbrev); \ continue; #include "llvm/IR/Metadata.def" } } if (const auto *MDC = dyn_cast<ConstantAsMetadata>(MD)) { WriteValueAsMetadata(MDC, VE, Stream, Record); continue; } const MDString *MDS = cast<MDString>(MD); // Code: [strchar x N] Record.append(MDS->bytes_begin(), MDS->bytes_end()); // Emit the finished record. Stream.EmitRecord(bitc::METADATA_STRING, Record, MDSAbbrev); Record.clear(); } // Write named metadata. for (const NamedMDNode &NMD : M->named_metadata()) { // Write name. StringRef Str = NMD.getName(); Record.append(Str.bytes_begin(), Str.bytes_end()); Stream.EmitRecord(bitc::METADATA_NAME, Record, NameAbbrev); Record.clear(); // Write named metadata operands. for (const MDNode *N : NMD.operands()) Record.push_back(VE.getMetadataID(N)); Stream.EmitRecord(bitc::METADATA_NAMED_NODE, Record, 0); Record.clear(); } Stream.ExitBlock(); } static void WriteFunctionLocalMetadata(const Function &F, const ValueEnumerator &VE, BitstreamWriter &Stream) { bool StartedMetadataBlock = false; SmallVector<uint64_t, 64> Record; const SmallVectorImpl<const LocalAsMetadata *> &MDs = VE.getFunctionLocalMDs(); for (unsigned i = 0, e = MDs.size(); i != e; ++i) { assert(MDs[i] && "Expected valid function-local metadata"); if (!StartedMetadataBlock) { Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3); StartedMetadataBlock = true; } WriteValueAsMetadata(MDs[i], VE, Stream, Record); } if (StartedMetadataBlock) Stream.ExitBlock(); } static void WriteMetadataAttachment(const Function &F, const ValueEnumerator &VE, BitstreamWriter &Stream) { Stream.EnterSubblock(bitc::METADATA_ATTACHMENT_ID, 3); SmallVector<uint64_t, 64> Record; // Write metadata attachments // METADATA_ATTACHMENT - [m x [value, [n x [id, mdnode]]] SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; F.getAllMetadata(MDs); if (!MDs.empty()) { for (const auto &I : MDs) { Record.push_back(I.first); Record.push_back(VE.getMetadataID(I.second)); } Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0); Record.clear(); } for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) { MDs.clear(); I->getAllMetadataOtherThanDebugLoc(MDs); // If no metadata, ignore instruction. if (MDs.empty()) continue; Record.push_back(VE.getInstructionID(I)); for (unsigned i = 0, e = MDs.size(); i != e; ++i) { Record.push_back(MDs[i].first); Record.push_back(VE.getMetadataID(MDs[i].second)); } Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0); Record.clear(); } Stream.ExitBlock(); } static void WriteModuleMetadataStore(const Module *M, BitstreamWriter &Stream) { SmallVector<uint64_t, 64> Record; // Write metadata kinds // METADATA_KIND - [n x [id, name]] SmallVector<StringRef, 8> Names; M->getMDKindNames(Names); if (Names.empty()) return; Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3); for (unsigned MDKindID = 0, e = Names.size(); MDKindID != e; ++MDKindID) { Record.push_back(MDKindID); StringRef KName = Names[MDKindID]; Record.append(KName.begin(), KName.end()); Stream.EmitRecord(bitc::METADATA_KIND, Record, 0); Record.clear(); } Stream.ExitBlock(); } static void emitSignedInt64(SmallVectorImpl<uint64_t> &Vals, uint64_t V) { if ((int64_t)V >= 0) Vals.push_back(V << 1); else Vals.push_back((-V << 1) | 1); } static void WriteConstants(unsigned FirstVal, unsigned LastVal, const ValueEnumerator &VE, BitstreamWriter &Stream, bool isGlobal) { if (FirstVal == LastVal) return; Stream.EnterSubblock(bitc::CONSTANTS_BLOCK_ID, 4); unsigned AggregateAbbrev = 0; unsigned String8Abbrev = 0; unsigned CString7Abbrev = 0; unsigned CString6Abbrev = 0; // If this is a constant pool for the module, emit module-specific abbrevs. if (isGlobal) { // Abbrev for CST_CODE_AGGREGATE. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_AGGREGATE)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, Log2_32_Ceil(LastVal+1))); AggregateAbbrev = Stream.EmitAbbrev(Abbv.get()); // Abbrev for CST_CODE_STRING. Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_STRING)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); String8Abbrev = Stream.EmitAbbrev(Abbv.get()); // Abbrev for CST_CODE_CSTRING. Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CSTRING)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7)); CString7Abbrev = Stream.EmitAbbrev(Abbv.get()); // Abbrev for CST_CODE_CSTRING. Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CSTRING)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); CString6Abbrev = Stream.EmitAbbrev(Abbv.get()); } SmallVector<uint64_t, 64> Record; const ValueEnumerator::ValueList &Vals = VE.getValues(); Type *LastTy = nullptr; for (unsigned i = FirstVal; i != LastVal; ++i) { const Value *V = Vals[i].first; // If we need to switch types, do so now. if (V->getType() != LastTy) { LastTy = V->getType(); Record.push_back(VE.getTypeID(LastTy)); Stream.EmitRecord(bitc::CST_CODE_SETTYPE, Record, CONSTANTS_SETTYPE_ABBREV); Record.clear(); } if (const InlineAsm *IA = dyn_cast<InlineAsm>(V)) { Record.push_back(unsigned(IA->hasSideEffects()) | unsigned(IA->isAlignStack()) << 1 | unsigned(IA->getDialect()&1) << 2); // Add the asm string. const std::string &AsmStr = IA->getAsmString(); Record.push_back(AsmStr.size()); Record.append(AsmStr.begin(), AsmStr.end()); // Add the constraint string. const std::string &ConstraintStr = IA->getConstraintString(); Record.push_back(ConstraintStr.size()); Record.append(ConstraintStr.begin(), ConstraintStr.end()); Stream.EmitRecord(bitc::CST_CODE_INLINEASM, Record); Record.clear(); continue; } const Constant *C = cast<Constant>(V); unsigned Code = -1U; unsigned AbbrevToUse = 0; if (C->isNullValue()) { Code = bitc::CST_CODE_NULL; } else if (isa<UndefValue>(C)) { Code = bitc::CST_CODE_UNDEF; } else if (const ConstantInt *IV = dyn_cast<ConstantInt>(C)) { if (IV->getBitWidth() <= 64) { uint64_t V = IV->getSExtValue(); emitSignedInt64(Record, V); Code = bitc::CST_CODE_INTEGER; AbbrevToUse = CONSTANTS_INTEGER_ABBREV; } else { // Wide integers, > 64 bits in size. // We have an arbitrary precision integer value to write whose // bit width is > 64. However, in canonical unsigned integer // format it is likely that the high bits are going to be zero. // So, we only write the number of active words. unsigned NWords = IV->getValue().getActiveWords(); const uint64_t *RawWords = IV->getValue().getRawData(); for (unsigned i = 0; i != NWords; ++i) { emitSignedInt64(Record, RawWords[i]); } Code = bitc::CST_CODE_WIDE_INTEGER; } } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { Code = bitc::CST_CODE_FLOAT; Type *Ty = CFP->getType(); if (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy()) { Record.push_back(CFP->getValueAPF().bitcastToAPInt().getZExtValue()); } else if (Ty->isX86_FP80Ty()) { // api needed to prevent premature destruction // bits are not in the same order as a normal i80 APInt, compensate. APInt api = CFP->getValueAPF().bitcastToAPInt(); const uint64_t *p = api.getRawData(); Record.push_back((p[1] << 48) | (p[0] >> 16)); Record.push_back(p[0] & 0xffffLL); } else if (Ty->isFP128Ty() || Ty->isPPC_FP128Ty()) { APInt api = CFP->getValueAPF().bitcastToAPInt(); const uint64_t *p = api.getRawData(); Record.push_back(p[0]); Record.push_back(p[1]); } else { assert (0 && "Unknown FP type!"); } } else if (isa<ConstantDataSequential>(C) && cast<ConstantDataSequential>(C)->isString()) { const ConstantDataSequential *Str = cast<ConstantDataSequential>(C); // Emit constant strings specially. unsigned NumElts = Str->getNumElements(); // If this is a null-terminated string, use the denser CSTRING encoding. if (Str->isCString()) { Code = bitc::CST_CODE_CSTRING; --NumElts; // Don't encode the null, which isn't allowed by char6. } else { Code = bitc::CST_CODE_STRING; AbbrevToUse = String8Abbrev; } bool isCStr7 = Code == bitc::CST_CODE_CSTRING; bool isCStrChar6 = Code == bitc::CST_CODE_CSTRING; for (unsigned i = 0; i != NumElts; ++i) { unsigned char V = Str->getElementAsInteger(i); Record.push_back(V); isCStr7 &= (V & 128) == 0; if (isCStrChar6) isCStrChar6 = BitCodeAbbrevOp::isChar6(V); } if (isCStrChar6) AbbrevToUse = CString6Abbrev; else if (isCStr7) AbbrevToUse = CString7Abbrev; } else if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(C)) { Code = bitc::CST_CODE_DATA; Type *EltTy = CDS->getType()->getElementType(); if (isa<IntegerType>(EltTy)) { for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) Record.push_back(CDS->getElementAsInteger(i)); } else if (EltTy->isFloatTy()) { for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { union { float F; uint32_t I; }; F = CDS->getElementAsFloat(i); Record.push_back(I); } } else { assert(EltTy->isDoubleTy() && "Unknown ConstantData element type"); for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { union { double F; uint64_t I; }; F = CDS->getElementAsDouble(i); Record.push_back(I); } } } else if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) || isa<ConstantVector>(C)) { Code = bitc::CST_CODE_AGGREGATE; for (const Value *Op : C->operands()) Record.push_back(VE.getValueID(Op)); AbbrevToUse = AggregateAbbrev; } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { switch (CE->getOpcode()) { default: if (Instruction::isCast(CE->getOpcode())) { Code = bitc::CST_CODE_CE_CAST; Record.push_back(GetEncodedCastOpcode(CE->getOpcode())); Record.push_back(VE.getTypeID(C->getOperand(0)->getType())); Record.push_back(VE.getValueID(C->getOperand(0))); AbbrevToUse = CONSTANTS_CE_CAST_Abbrev; } else { assert(CE->getNumOperands() == 2 && "Unknown constant expr!"); Code = bitc::CST_CODE_CE_BINOP; Record.push_back(GetEncodedBinaryOpcode(CE->getOpcode())); Record.push_back(VE.getValueID(C->getOperand(0))); Record.push_back(VE.getValueID(C->getOperand(1))); uint64_t Flags = GetOptimizationFlags(CE); if (Flags != 0) Record.push_back(Flags); } break; case Instruction::GetElementPtr: { Code = bitc::CST_CODE_CE_GEP; const auto *GO = cast<GEPOperator>(C); if (GO->isInBounds()) Code = bitc::CST_CODE_CE_INBOUNDS_GEP; Record.push_back(VE.getTypeID(GO->getSourceElementType())); for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i) { Record.push_back(VE.getTypeID(C->getOperand(i)->getType())); Record.push_back(VE.getValueID(C->getOperand(i))); } break; } case Instruction::Select: Code = bitc::CST_CODE_CE_SELECT; Record.push_back(VE.getValueID(C->getOperand(0))); Record.push_back(VE.getValueID(C->getOperand(1))); Record.push_back(VE.getValueID(C->getOperand(2))); break; case Instruction::ExtractElement: Code = bitc::CST_CODE_CE_EXTRACTELT; Record.push_back(VE.getTypeID(C->getOperand(0)->getType())); Record.push_back(VE.getValueID(C->getOperand(0))); Record.push_back(VE.getTypeID(C->getOperand(1)->getType())); Record.push_back(VE.getValueID(C->getOperand(1))); break; case Instruction::InsertElement: Code = bitc::CST_CODE_CE_INSERTELT; Record.push_back(VE.getValueID(C->getOperand(0))); Record.push_back(VE.getValueID(C->getOperand(1))); Record.push_back(VE.getTypeID(C->getOperand(2)->getType())); Record.push_back(VE.getValueID(C->getOperand(2))); break; case Instruction::ShuffleVector: // If the return type and argument types are the same, this is a // standard shufflevector instruction. If the types are different, // then the shuffle is widening or truncating the input vectors, and // the argument type must also be encoded. if (C->getType() == C->getOperand(0)->getType()) { Code = bitc::CST_CODE_CE_SHUFFLEVEC; } else { Code = bitc::CST_CODE_CE_SHUFVEC_EX; Record.push_back(VE.getTypeID(C->getOperand(0)->getType())); } Record.push_back(VE.getValueID(C->getOperand(0))); Record.push_back(VE.getValueID(C->getOperand(1))); Record.push_back(VE.getValueID(C->getOperand(2))); break; case Instruction::ICmp: case Instruction::FCmp: Code = bitc::CST_CODE_CE_CMP; Record.push_back(VE.getTypeID(C->getOperand(0)->getType())); Record.push_back(VE.getValueID(C->getOperand(0))); Record.push_back(VE.getValueID(C->getOperand(1))); Record.push_back(CE->getPredicate()); break; } } else if (const BlockAddress *BA = dyn_cast<BlockAddress>(C)) { Code = bitc::CST_CODE_BLOCKADDRESS; Record.push_back(VE.getTypeID(BA->getFunction()->getType())); Record.push_back(VE.getValueID(BA->getFunction())); Record.push_back(VE.getGlobalBasicBlockID(BA->getBasicBlock())); } else { #ifndef NDEBUG C->dump(); #endif llvm_unreachable("Unknown constant!"); } Stream.EmitRecord(Code, Record, AbbrevToUse); Record.clear(); } Stream.ExitBlock(); } static void WriteModuleConstants(const ValueEnumerator &VE, BitstreamWriter &Stream) { const ValueEnumerator::ValueList &Vals = VE.getValues(); // Find the first constant to emit, which is the first non-globalvalue value. // We know globalvalues have been emitted by WriteModuleInfo. for (unsigned i = 0, e = Vals.size(); i != e; ++i) { if (!isa<GlobalValue>(Vals[i].first)) { WriteConstants(i, Vals.size(), VE, Stream, true); return; } } } /// PushValueAndType - The file has to encode both the value and type id for /// many values, because we need to know what type to create for forward /// references. However, most operands are not forward references, so this type /// field is not needed. /// /// This function adds V's value ID to Vals. If the value ID is higher than the /// instruction ID, then it is a forward reference, and it also includes the /// type ID. The value ID that is written is encoded relative to the InstID. static bool PushValueAndType(const Value *V, unsigned InstID, SmallVectorImpl<unsigned> &Vals, ValueEnumerator &VE) { unsigned ValID = VE.getValueID(V); // Make encoding relative to the InstID. Vals.push_back(InstID - ValID); if (ValID >= InstID) { Vals.push_back(VE.getTypeID(V->getType())); return true; } return false; } /// pushValue - Like PushValueAndType, but where the type of the value is /// omitted (perhaps it was already encoded in an earlier operand). static void pushValue(const Value *V, unsigned InstID, SmallVectorImpl<unsigned> &Vals, ValueEnumerator &VE) { unsigned ValID = VE.getValueID(V); Vals.push_back(InstID - ValID); } static void pushValueSigned(const Value *V, unsigned InstID, SmallVectorImpl<uint64_t> &Vals, ValueEnumerator &VE) { unsigned ValID = VE.getValueID(V); int64_t diff = ((int32_t)InstID - (int32_t)ValID); emitSignedInt64(Vals, diff); } /// WriteInstruction - Emit an instruction to the specified stream. static void WriteInstruction(const Instruction &I, unsigned InstID, ValueEnumerator &VE, BitstreamWriter &Stream, SmallVectorImpl<unsigned> &Vals) { unsigned Code = 0; unsigned AbbrevToUse = 0; VE.setInstructionID(&I); switch (I.getOpcode()) { default: if (Instruction::isCast(I.getOpcode())) { Code = bitc::FUNC_CODE_INST_CAST; if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) AbbrevToUse = FUNCTION_INST_CAST_ABBREV; Vals.push_back(VE.getTypeID(I.getType())); Vals.push_back(GetEncodedCastOpcode(I.getOpcode())); } else { assert(isa<BinaryOperator>(I) && "Unknown instruction!"); Code = bitc::FUNC_CODE_INST_BINOP; if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) AbbrevToUse = FUNCTION_INST_BINOP_ABBREV; pushValue(I.getOperand(1), InstID, Vals, VE); Vals.push_back(GetEncodedBinaryOpcode(I.getOpcode())); uint64_t Flags = GetOptimizationFlags(&I); if (Flags != 0) { if (AbbrevToUse == FUNCTION_INST_BINOP_ABBREV) AbbrevToUse = FUNCTION_INST_BINOP_FLAGS_ABBREV; Vals.push_back(Flags); } } break; case Instruction::GetElementPtr: { Code = bitc::FUNC_CODE_INST_GEP; AbbrevToUse = FUNCTION_INST_GEP_ABBREV; auto &GEPInst = cast<GetElementPtrInst>(I); Vals.push_back(GEPInst.isInBounds()); Vals.push_back(VE.getTypeID(GEPInst.getSourceElementType())); for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) PushValueAndType(I.getOperand(i), InstID, Vals, VE); break; } case Instruction::ExtractValue: { Code = bitc::FUNC_CODE_INST_EXTRACTVAL; PushValueAndType(I.getOperand(0), InstID, Vals, VE); const ExtractValueInst *EVI = cast<ExtractValueInst>(&I); Vals.append(EVI->idx_begin(), EVI->idx_end()); break; } case Instruction::InsertValue: { Code = bitc::FUNC_CODE_INST_INSERTVAL; PushValueAndType(I.getOperand(0), InstID, Vals, VE); PushValueAndType(I.getOperand(1), InstID, Vals, VE); const InsertValueInst *IVI = cast<InsertValueInst>(&I); Vals.append(IVI->idx_begin(), IVI->idx_end()); break; } case Instruction::Select: Code = bitc::FUNC_CODE_INST_VSELECT; PushValueAndType(I.getOperand(1), InstID, Vals, VE); pushValue(I.getOperand(2), InstID, Vals, VE); PushValueAndType(I.getOperand(0), InstID, Vals, VE); break; case Instruction::ExtractElement: Code = bitc::FUNC_CODE_INST_EXTRACTELT; PushValueAndType(I.getOperand(0), InstID, Vals, VE); PushValueAndType(I.getOperand(1), InstID, Vals, VE); break; case Instruction::InsertElement: Code = bitc::FUNC_CODE_INST_INSERTELT; PushValueAndType(I.getOperand(0), InstID, Vals, VE); pushValue(I.getOperand(1), InstID, Vals, VE); PushValueAndType(I.getOperand(2), InstID, Vals, VE); break; case Instruction::ShuffleVector: Code = bitc::FUNC_CODE_INST_SHUFFLEVEC; PushValueAndType(I.getOperand(0), InstID, Vals, VE); pushValue(I.getOperand(1), InstID, Vals, VE); pushValue(I.getOperand(2), InstID, Vals, VE); break; case Instruction::ICmp: case Instruction::FCmp: { // compare returning Int1Ty or vector of Int1Ty Code = bitc::FUNC_CODE_INST_CMP2; PushValueAndType(I.getOperand(0), InstID, Vals, VE); pushValue(I.getOperand(1), InstID, Vals, VE); Vals.push_back(cast<CmpInst>(I).getPredicate()); uint64_t Flags = GetOptimizationFlags(&I); if (Flags != 0) Vals.push_back(Flags); break; } case Instruction::Ret: { Code = bitc::FUNC_CODE_INST_RET; unsigned NumOperands = I.getNumOperands(); if (NumOperands == 0) AbbrevToUse = FUNCTION_INST_RET_VOID_ABBREV; else if (NumOperands == 1) { if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) AbbrevToUse = FUNCTION_INST_RET_VAL_ABBREV; } else { for (unsigned i = 0, e = NumOperands; i != e; ++i) PushValueAndType(I.getOperand(i), InstID, Vals, VE); } } break; case Instruction::Br: { Code = bitc::FUNC_CODE_INST_BR; const BranchInst &II = cast<BranchInst>(I); Vals.push_back(VE.getValueID(II.getSuccessor(0))); if (II.isConditional()) { Vals.push_back(VE.getValueID(II.getSuccessor(1))); pushValue(II.getCondition(), InstID, Vals, VE); } } break; case Instruction::Switch: { Code = bitc::FUNC_CODE_INST_SWITCH; const SwitchInst &SI = cast<SwitchInst>(I); Vals.push_back(VE.getTypeID(SI.getCondition()->getType())); pushValue(SI.getCondition(), InstID, Vals, VE); Vals.push_back(VE.getValueID(SI.getDefaultDest())); for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end(); i != e; ++i) { Vals.push_back(VE.getValueID(i.getCaseValue())); Vals.push_back(VE.getValueID(i.getCaseSuccessor())); } } break; case Instruction::IndirectBr: Code = bitc::FUNC_CODE_INST_INDIRECTBR; Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); // Encode the address operand as relative, but not the basic blocks. pushValue(I.getOperand(0), InstID, Vals, VE); for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) Vals.push_back(VE.getValueID(I.getOperand(i))); break; case Instruction::Invoke: { const InvokeInst *II = cast<InvokeInst>(&I); const Value *Callee = II->getCalledValue(); FunctionType *FTy = II->getFunctionType(); Code = bitc::FUNC_CODE_INST_INVOKE; Vals.push_back(VE.getAttributeID(II->getAttributes())); Vals.push_back(II->getCallingConv() | 1 << 13); Vals.push_back(VE.getValueID(II->getNormalDest())); Vals.push_back(VE.getValueID(II->getUnwindDest())); Vals.push_back(VE.getTypeID(FTy)); PushValueAndType(Callee, InstID, Vals, VE); // Emit value #'s for the fixed parameters. for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) pushValue(I.getOperand(i), InstID, Vals, VE); // fixed param. // Emit type/value pairs for varargs params. if (FTy->isVarArg()) { for (unsigned i = FTy->getNumParams(), e = I.getNumOperands()-3; i != e; ++i) PushValueAndType(I.getOperand(i), InstID, Vals, VE); // vararg } break; } case Instruction::Resume: Code = bitc::FUNC_CODE_INST_RESUME; PushValueAndType(I.getOperand(0), InstID, Vals, VE); break; case Instruction::Unreachable: Code = bitc::FUNC_CODE_INST_UNREACHABLE; AbbrevToUse = FUNCTION_INST_UNREACHABLE_ABBREV; break; case Instruction::PHI: { const PHINode &PN = cast<PHINode>(I); Code = bitc::FUNC_CODE_INST_PHI; // With the newer instruction encoding, forward references could give // negative valued IDs. This is most common for PHIs, so we use // signed VBRs. SmallVector<uint64_t, 128> Vals64; Vals64.push_back(VE.getTypeID(PN.getType())); for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { pushValueSigned(PN.getIncomingValue(i), InstID, Vals64, VE); Vals64.push_back(VE.getValueID(PN.getIncomingBlock(i))); } // Emit a Vals64 vector and exit. Stream.EmitRecord(Code, Vals64, AbbrevToUse); Vals64.clear(); return; } case Instruction::LandingPad: { const LandingPadInst &LP = cast<LandingPadInst>(I); Code = bitc::FUNC_CODE_INST_LANDINGPAD; Vals.push_back(VE.getTypeID(LP.getType())); Vals.push_back(LP.isCleanup()); Vals.push_back(LP.getNumClauses()); for (unsigned I = 0, E = LP.getNumClauses(); I != E; ++I) { if (LP.isCatch(I)) Vals.push_back(LandingPadInst::Catch); else Vals.push_back(LandingPadInst::Filter); PushValueAndType(LP.getClause(I), InstID, Vals, VE); } break; } case Instruction::Alloca: { Code = bitc::FUNC_CODE_INST_ALLOCA; const AllocaInst &AI = cast<AllocaInst>(I); Vals.push_back(VE.getTypeID(AI.getAllocatedType())); Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); Vals.push_back(VE.getValueID(I.getOperand(0))); // size. unsigned AlignRecord = Log2_32(AI.getAlignment()) + 1; assert(Log2_32(Value::MaximumAlignment) + 1 < 1 << 5 && "not enough bits for maximum alignment"); assert(AlignRecord < 1 << 5 && "alignment greater than 1 << 64"); AlignRecord |= AI.isUsedWithInAlloca() << 5; AlignRecord |= 1 << 6; Vals.push_back(AlignRecord); break; } case Instruction::Load: if (cast<LoadInst>(I).isAtomic()) { Code = bitc::FUNC_CODE_INST_LOADATOMIC; PushValueAndType(I.getOperand(0), InstID, Vals, VE); } else { Code = bitc::FUNC_CODE_INST_LOAD; if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) // ptr AbbrevToUse = FUNCTION_INST_LOAD_ABBREV; } Vals.push_back(VE.getTypeID(I.getType())); Vals.push_back(Log2_32(cast<LoadInst>(I).getAlignment())+1); Vals.push_back(cast<LoadInst>(I).isVolatile()); if (cast<LoadInst>(I).isAtomic()) { Vals.push_back(GetEncodedOrdering(cast<LoadInst>(I).getOrdering())); Vals.push_back(GetEncodedSynchScope(cast<LoadInst>(I).getSynchScope())); } break; case Instruction::Store: if (cast<StoreInst>(I).isAtomic()) Code = bitc::FUNC_CODE_INST_STOREATOMIC; else Code = bitc::FUNC_CODE_INST_STORE; PushValueAndType(I.getOperand(1), InstID, Vals, VE); // ptrty + ptr PushValueAndType(I.getOperand(0), InstID, Vals, VE); // valty + val Vals.push_back(Log2_32(cast<StoreInst>(I).getAlignment())+1); Vals.push_back(cast<StoreInst>(I).isVolatile()); if (cast<StoreInst>(I).isAtomic()) { Vals.push_back(GetEncodedOrdering(cast<StoreInst>(I).getOrdering())); Vals.push_back(GetEncodedSynchScope(cast<StoreInst>(I).getSynchScope())); } break; case Instruction::AtomicCmpXchg: Code = bitc::FUNC_CODE_INST_CMPXCHG; PushValueAndType(I.getOperand(0), InstID, Vals, VE); // ptrty + ptr PushValueAndType(I.getOperand(1), InstID, Vals, VE); // cmp. pushValue(I.getOperand(2), InstID, Vals, VE); // newval. Vals.push_back(cast<AtomicCmpXchgInst>(I).isVolatile()); Vals.push_back(GetEncodedOrdering( cast<AtomicCmpXchgInst>(I).getSuccessOrdering())); Vals.push_back(GetEncodedSynchScope( cast<AtomicCmpXchgInst>(I).getSynchScope())); Vals.push_back(GetEncodedOrdering( cast<AtomicCmpXchgInst>(I).getFailureOrdering())); Vals.push_back(cast<AtomicCmpXchgInst>(I).isWeak()); break; case Instruction::AtomicRMW: Code = bitc::FUNC_CODE_INST_ATOMICRMW; PushValueAndType(I.getOperand(0), InstID, Vals, VE); // ptrty + ptr pushValue(I.getOperand(1), InstID, Vals, VE); // val. Vals.push_back(GetEncodedRMWOperation( cast<AtomicRMWInst>(I).getOperation())); Vals.push_back(cast<AtomicRMWInst>(I).isVolatile()); Vals.push_back(GetEncodedOrdering(cast<AtomicRMWInst>(I).getOrdering())); Vals.push_back(GetEncodedSynchScope( cast<AtomicRMWInst>(I).getSynchScope())); break; case Instruction::Fence: Code = bitc::FUNC_CODE_INST_FENCE; Vals.push_back(GetEncodedOrdering(cast<FenceInst>(I).getOrdering())); Vals.push_back(GetEncodedSynchScope(cast<FenceInst>(I).getSynchScope())); break; case Instruction::Call: { const CallInst &CI = cast<CallInst>(I); FunctionType *FTy = CI.getFunctionType(); Code = bitc::FUNC_CODE_INST_CALL; Vals.push_back(VE.getAttributeID(CI.getAttributes())); Vals.push_back((CI.getCallingConv() << 1) | unsigned(CI.isTailCall()) | unsigned(CI.isMustTailCall()) << 14 | 1 << 15); Vals.push_back(VE.getTypeID(FTy)); PushValueAndType(CI.getCalledValue(), InstID, Vals, VE); // Callee // Emit value #'s for the fixed parameters. for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) { // Check for labels (can happen with asm labels). if (FTy->getParamType(i)->isLabelTy()) Vals.push_back(VE.getValueID(CI.getArgOperand(i))); else pushValue(CI.getArgOperand(i), InstID, Vals, VE); // fixed param. } // Emit type/value pairs for varargs params. if (FTy->isVarArg()) { for (unsigned i = FTy->getNumParams(), e = CI.getNumArgOperands(); i != e; ++i) PushValueAndType(CI.getArgOperand(i), InstID, Vals, VE); // varargs } break; } case Instruction::VAArg: Code = bitc::FUNC_CODE_INST_VAARG; Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); // valistty pushValue(I.getOperand(0), InstID, Vals, VE); // valist. Vals.push_back(VE.getTypeID(I.getType())); // restype. break; } Stream.EmitRecord(Code, Vals, AbbrevToUse); Vals.clear(); } // Emit names for globals/functions etc. static void WriteValueSymbolTable(const ValueSymbolTable &VST, const ValueEnumerator &VE, BitstreamWriter &Stream) { if (VST.empty()) return; Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4); // FIXME: Set up the abbrev, we know how many values there are! // FIXME: We know if the type names can use 7-bit ascii. SmallVector<unsigned, 64> NameVals; // HLSL Change - Begin // Read the named values from a sorted list instead of the original list // to ensure the binary is the same no matter what values ever existed. SmallVector<const ValueName *, 16> SortedTable; for (ValueSymbolTable::const_iterator SI = VST.begin(), SE = VST.end(); SI != SE; ++SI) { SortedTable.push_back(&(*SI)); } // The keys are unique, so there shouldn't be stability issues std::sort(SortedTable.begin(), SortedTable.end(), [](const ValueName *A, const ValueName *B) { return (*A).first() < (*B).first(); }); for (const ValueName *SI : SortedTable) { auto &Name = *SI; // HLSL Change - End #if 0 // HLSL Change for (ValueSymbolTable::const_iterator SI = VST.begin(), SE = VST.end(); SI != SE; ++SI) { const ValueName &Name = *SI; #endif // HLSL Change // Figure out the encoding to use for the name. bool is7Bit = true; bool isChar6 = true; for (const char *C = Name.getKeyData(), *E = C+Name.getKeyLength(); C != E; ++C) { if (isChar6) isChar6 = BitCodeAbbrevOp::isChar6(*C); if ((unsigned char)*C & 128) { is7Bit = false; break; // don't bother scanning the rest. } } unsigned AbbrevToUse = VST_ENTRY_8_ABBREV; // VST_ENTRY: [valueid, namechar x N] // VST_BBENTRY: [bbid, namechar x N] unsigned Code; if (isa<BasicBlock>(SI->getValue())) { Code = bitc::VST_CODE_BBENTRY; if (isChar6) AbbrevToUse = VST_BBENTRY_6_ABBREV; } else { Code = bitc::VST_CODE_ENTRY; if (isChar6) AbbrevToUse = VST_ENTRY_6_ABBREV; else if (is7Bit) AbbrevToUse = VST_ENTRY_7_ABBREV; } NameVals.push_back(VE.getValueID(SI->getValue())); for (const char *P = Name.getKeyData(), *E = Name.getKeyData()+Name.getKeyLength(); P != E; ++P) NameVals.push_back((unsigned char)*P); // Emit the finished record. Stream.EmitRecord(Code, NameVals, AbbrevToUse); NameVals.clear(); } Stream.ExitBlock(); } static void WriteUseList(ValueEnumerator &VE, UseListOrder &&Order, BitstreamWriter &Stream) { assert(Order.Shuffle.size() >= 2 && "Shuffle too small"); unsigned Code; if (isa<BasicBlock>(Order.V)) Code = bitc::USELIST_CODE_BB; else Code = bitc::USELIST_CODE_DEFAULT; SmallVector<uint64_t, 64> Record(Order.Shuffle.begin(), Order.Shuffle.end()); Record.push_back(VE.getValueID(Order.V)); Stream.EmitRecord(Code, Record); } static void WriteUseListBlock(const Function *F, ValueEnumerator &VE, BitstreamWriter &Stream) { assert(VE.shouldPreserveUseListOrder() && "Expected to be preserving use-list order"); auto hasMore = [&]() { return !VE.UseListOrders.empty() && VE.UseListOrders.back().F == F; }; if (!hasMore()) // Nothing to do. return; Stream.EnterSubblock(bitc::USELIST_BLOCK_ID, 3); while (hasMore()) { WriteUseList(VE, std::move(VE.UseListOrders.back()), Stream); VE.UseListOrders.pop_back(); } Stream.ExitBlock(); } /// WriteFunction - Emit a function body to the module stream. static void WriteFunction(const Function &F, ValueEnumerator &VE, BitstreamWriter &Stream) { Stream.EnterSubblock(bitc::FUNCTION_BLOCK_ID, 4); VE.incorporateFunction(F); SmallVector<unsigned, 64> Vals; // Emit the number of basic blocks, so the reader can create them ahead of // time. Vals.push_back(VE.getBasicBlocks().size()); Stream.EmitRecord(bitc::FUNC_CODE_DECLAREBLOCKS, Vals); Vals.clear(); // If there are function-local constants, emit them now. unsigned CstStart, CstEnd; VE.getFunctionConstantRange(CstStart, CstEnd); WriteConstants(CstStart, CstEnd, VE, Stream, false); // If there is function-local metadata, emit it now. WriteFunctionLocalMetadata(F, VE, Stream); // Keep a running idea of what the instruction ID is. unsigned InstID = CstEnd; bool NeedsMetadataAttachment = F.hasMetadata(); DILocation *LastDL = nullptr; // Finally, emit all the instructions, in order. for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) { WriteInstruction(*I, InstID, VE, Stream, Vals); if (!I->getType()->isVoidTy()) ++InstID; // If the instruction has metadata, write a metadata attachment later. NeedsMetadataAttachment |= I->hasMetadataOtherThanDebugLoc(); // If the instruction has a debug location, emit it. DILocation *DL = I->getDebugLoc(); if (!DL) continue; if (DL == LastDL) { // Just repeat the same debug loc as last time. Stream.EmitRecord(bitc::FUNC_CODE_DEBUG_LOC_AGAIN, Vals); continue; } Vals.push_back(DL->getLine()); Vals.push_back(DL->getColumn()); Vals.push_back(VE.getMetadataOrNullID(DL->getScope())); Vals.push_back(VE.getMetadataOrNullID(DL->getInlinedAt())); Stream.EmitRecord(bitc::FUNC_CODE_DEBUG_LOC, Vals); Vals.clear(); LastDL = DL; } // Emit names for all the instructions etc. WriteValueSymbolTable(F.getValueSymbolTable(), VE, Stream); if (NeedsMetadataAttachment) WriteMetadataAttachment(F, VE, Stream); if (VE.shouldPreserveUseListOrder()) WriteUseListBlock(&F, VE, Stream); VE.purgeFunction(); Stream.ExitBlock(); } // Emit blockinfo, which defines the standard abbreviations etc. static void WriteBlockInfo(const ValueEnumerator &VE, BitstreamWriter &Stream) { // We only want to emit block info records for blocks that have multiple // instances: CONSTANTS_BLOCK, FUNCTION_BLOCK and VALUE_SYMTAB_BLOCK. // Other blocks can define their abbrevs inline. Stream.EnterBlockInfoBlock(2); { // 8-bit fixed-width VST_ENTRY/VST_BBENTRY strings. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv.get()) != VST_ENTRY_8_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } { // 7-bit fixed width VST_ENTRY strings. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_ENTRY)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7)); if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv.get()) != VST_ENTRY_7_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } { // 6-bit char6 VST_ENTRY strings. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_ENTRY)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv.get()) != VST_ENTRY_6_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } { // 6-bit char6 VST_BBENTRY strings. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_BBENTRY)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv.get()) != VST_BBENTRY_6_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } { // SETTYPE abbrev for CONSTANTS_BLOCK. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_SETTYPE)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, VE.computeBitsRequiredForTypeIndicies())); if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv.get()) != CONSTANTS_SETTYPE_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } { // INTEGER abbrev for CONSTANTS_BLOCK. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_INTEGER)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv.get()) != CONSTANTS_INTEGER_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } { // CE_CAST abbrev for CONSTANTS_BLOCK. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CE_CAST)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // cast opc Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // typeid VE.computeBitsRequiredForTypeIndicies())); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv.get()) != CONSTANTS_CE_CAST_Abbrev) llvm_unreachable("Unexpected abbrev ordering!"); } { // NULL abbrev for CONSTANTS_BLOCK. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_NULL)); if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv.get()) != CONSTANTS_NULL_Abbrev) llvm_unreachable("Unexpected abbrev ordering!"); } // FIXME: This should only use space for first class types! { // INST_LOAD abbrev for FUNCTION_BLOCK. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_LOAD)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Ptr Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty VE.computeBitsRequiredForTypeIndicies())); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // Align Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // volatile if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv.get()) != FUNCTION_INST_LOAD_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } { // INST_BINOP abbrev for FUNCTION_BLOCK. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_BINOP)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // RHS Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv.get()) != FUNCTION_INST_BINOP_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } { // INST_BINOP_FLAGS abbrev for FUNCTION_BLOCK. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_BINOP)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // RHS Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7)); // flags if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv.get()) != FUNCTION_INST_BINOP_FLAGS_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } { // INST_CAST abbrev for FUNCTION_BLOCK. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_CAST)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // OpVal Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty VE.computeBitsRequiredForTypeIndicies())); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv.get()) != FUNCTION_INST_CAST_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } { // INST_RET abbrev for FUNCTION_BLOCK. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_RET)); if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv.get()) != FUNCTION_INST_RET_VOID_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } { // INST_RET abbrev for FUNCTION_BLOCK. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_RET)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ValID if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv.get()) != FUNCTION_INST_RET_VAL_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } { // INST_UNREACHABLE abbrev for FUNCTION_BLOCK. IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_UNREACHABLE)); if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv.get()) != FUNCTION_INST_UNREACHABLE_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } { IntrusiveRefCntPtr<BitCodeAbbrev> Abbv = new BitCodeAbbrev(); Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_GEP)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty Log2_32_Ceil(VE.getTypes().size() + 1))); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv.get()) != FUNCTION_INST_GEP_ABBREV) llvm_unreachable("Unexpected abbrev ordering!"); } Stream.ExitBlock(); } /// WriteModule - Emit the specified module to the bitstream. static void WriteModule(const Module *M, BitstreamWriter &Stream, bool ShouldPreserveUseListOrder) { Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3); SmallVector<unsigned, 1> Vals; unsigned CurVersion = 1; Vals.push_back(CurVersion); Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals); // Analyze the module, enumerating globals, functions, etc. ValueEnumerator VE(*M, ShouldPreserveUseListOrder); // Emit blockinfo, which defines the standard abbreviations etc. WriteBlockInfo(VE, Stream); // Emit information about attribute groups. WriteAttributeGroupTable(VE, Stream); // Emit information about parameter attributes. WriteAttributeTable(VE, Stream); // Emit information describing all of the types in the module. WriteTypeTable(VE, Stream); writeComdats(VE, Stream); // Emit top-level description of module, including target triple, inline asm, // descriptors for global variables, and function prototype info. WriteModuleInfo(M, VE, Stream); // Emit constants. WriteModuleConstants(VE, Stream); // Emit metadata. WriteModuleMetadata(M, VE, Stream); // Emit metadata. WriteModuleMetadataStore(M, Stream); // Emit names for globals/functions etc. WriteValueSymbolTable(M->getValueSymbolTable(), VE, Stream); // Emit module-level use-lists. if (VE.shouldPreserveUseListOrder()) WriteUseListBlock(nullptr, VE, Stream); // Emit function bodies. for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F) if (!F->isDeclaration()) WriteFunction(*F, VE, Stream); Stream.ExitBlock(); } /// EmitDarwinBCHeader - If generating a bc file on darwin, we have to emit a /// header and trailer to make it compatible with the system archiver. To do /// this we emit the following header, and then emit a trailer that pads the /// file out to be a multiple of 16 bytes. /// /// struct bc_header { /// uint32_t Magic; // 0x0B17C0DE /// uint32_t Version; // Version, currently always 0. /// uint32_t BitcodeOffset; // Offset to traditional bitcode file. /// uint32_t BitcodeSize; // Size of traditional bitcode file. /// uint32_t CPUType; // CPU specifier. /// ... potentially more later ... /// }; enum { DarwinBCSizeFieldOffset = 3*4, // Offset to bitcode_size. DarwinBCHeaderSize = 5*4 }; static void WriteInt32ToBuffer(uint32_t Value, SmallVectorImpl<char> &Buffer, uint32_t &Position) { support::endian::write32le(&Buffer[Position], Value); Position += 4; } static void EmitDarwinBCHeaderAndTrailer(SmallVectorImpl<char> &Buffer, const Triple &TT) { unsigned CPUType = ~0U; // Match x86_64-*, i[3-9]86-*, powerpc-*, powerpc64-*, arm-*, thumb-*, // armv[0-9]-*, thumbv[0-9]-*, armv5te-*, or armv6t2-*. The CPUType is a magic // number from /usr/include/mach/machine.h. It is ok to reproduce the // specific constants here because they are implicitly part of the Darwin ABI. enum { DARWIN_CPU_ARCH_ABI64 = 0x01000000, DARWIN_CPU_TYPE_X86 = 7, DARWIN_CPU_TYPE_ARM = 12, DARWIN_CPU_TYPE_POWERPC = 18 }; Triple::ArchType Arch = TT.getArch(); if (Arch == Triple::x86_64) CPUType = DARWIN_CPU_TYPE_X86 | DARWIN_CPU_ARCH_ABI64; else if (Arch == Triple::x86) CPUType = DARWIN_CPU_TYPE_X86; else if (Arch == Triple::ppc) CPUType = DARWIN_CPU_TYPE_POWERPC; else if (Arch == Triple::ppc64) CPUType = DARWIN_CPU_TYPE_POWERPC | DARWIN_CPU_ARCH_ABI64; else if (Arch == Triple::arm || Arch == Triple::thumb) CPUType = DARWIN_CPU_TYPE_ARM; // Traditional Bitcode starts after header. assert(Buffer.size() >= DarwinBCHeaderSize && "Expected header size to be reserved"); unsigned BCOffset = DarwinBCHeaderSize; unsigned BCSize = Buffer.size()-DarwinBCHeaderSize; // Write the magic and version. unsigned Position = 0; WriteInt32ToBuffer(0x0B17C0DE , Buffer, Position); WriteInt32ToBuffer(0 , Buffer, Position); // Version. WriteInt32ToBuffer(BCOffset , Buffer, Position); WriteInt32ToBuffer(BCSize , Buffer, Position); WriteInt32ToBuffer(CPUType , Buffer, Position); // If the file is not a multiple of 16 bytes, insert dummy padding. while (Buffer.size() & 15) Buffer.push_back(0); } /// WriteBitcodeToFile - Write the specified module to the specified output /// stream. void llvm::WriteBitcodeToFile(const Module *M, raw_ostream &Out, bool ShouldPreserveUseListOrder) { SmallVector<char, 0> Buffer; Buffer.reserve(256*1024); // If this is darwin or another generic macho target, reserve space for the // header. Triple TT(M->getTargetTriple()); if (TT.isOSDarwin()) Buffer.insert(Buffer.begin(), DarwinBCHeaderSize, 0); // Emit the module into the buffer. { BitstreamWriter Stream(Buffer); // Emit the file header. Stream.Emit((unsigned)'B', 8); Stream.Emit((unsigned)'C', 8); Stream.Emit(0x0, 4); Stream.Emit(0xC, 4); Stream.Emit(0xE, 4); Stream.Emit(0xD, 4); // Emit the module. WriteModule(M, Stream, ShouldPreserveUseListOrder); } if (TT.isOSDarwin()) EmitDarwinBCHeaderAndTrailer(Buffer, TT); // Write the generated bitstream to "Out". Out.write((char*)&Buffer.front(), Buffer.size()); }
0
repos/DirectXShaderCompiler/lib/Bitcode
repos/DirectXShaderCompiler/lib/Bitcode/Writer/CMakeLists.txt
add_llvm_library(LLVMBitWriter BitWriter.cpp BitcodeWriter.cpp BitcodeWriterPass.cpp ValueEnumerator.cpp DEPENDS intrinsics_gen )
0
repos/DirectXShaderCompiler/lib/Bitcode
repos/DirectXShaderCompiler/lib/Bitcode/Writer/ValueEnumerator.cpp
//===-- ValueEnumerator.cpp - Number values and types for bitcode writer --===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the ValueEnumerator class. // //===----------------------------------------------------------------------===// #include "ValueEnumerator.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/UseListOrder.h" #include "llvm/IR/ValueSymbolTable.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> using namespace llvm; namespace { struct OrderMap { DenseMap<const Value *, std::pair<unsigned, bool>> IDs; unsigned LastGlobalConstantID; unsigned LastGlobalValueID; OrderMap() : LastGlobalConstantID(0), LastGlobalValueID(0) {} bool isGlobalConstant(unsigned ID) const { return ID <= LastGlobalConstantID; } bool isGlobalValue(unsigned ID) const { return ID <= LastGlobalValueID && !isGlobalConstant(ID); } unsigned size() const { return IDs.size(); } std::pair<unsigned, bool> &operator[](const Value *V) { return IDs[V]; } std::pair<unsigned, bool> lookup(const Value *V) const { return IDs.lookup(V); } void index(const Value *V) { // Explicitly sequence get-size and insert-value operations to avoid UB. unsigned ID = IDs.size() + 1; IDs[V].first = ID; } }; } static void orderValue(const Value *V, OrderMap &OM) { if (OM.lookup(V).first) return; if (const Constant *C = dyn_cast<Constant>(V)) if (C->getNumOperands() && !isa<GlobalValue>(C)) for (const Value *Op : C->operands()) if (!isa<BasicBlock>(Op) && !isa<GlobalValue>(Op)) orderValue(Op, OM); // Note: we cannot cache this lookup above, since inserting into the map // changes the map's size, and thus affects the other IDs. OM.index(V); } static OrderMap orderModule(const Module &M) { // This needs to match the order used by ValueEnumerator::ValueEnumerator() // and ValueEnumerator::incorporateFunction(). OrderMap OM; // In the reader, initializers of GlobalValues are set *after* all the // globals have been read. Rather than awkwardly modeling this behaviour // directly in predictValueUseListOrderImpl(), just assign IDs to // initializers of GlobalValues before GlobalValues themselves to model this // implicitly. for (const GlobalVariable &G : M.globals()) if (G.hasInitializer()) if (!isa<GlobalValue>(G.getInitializer())) orderValue(G.getInitializer(), OM); for (const GlobalAlias &A : M.aliases()) if (!isa<GlobalValue>(A.getAliasee())) orderValue(A.getAliasee(), OM); for (const Function &F : M) { if (F.hasPrefixData()) if (!isa<GlobalValue>(F.getPrefixData())) orderValue(F.getPrefixData(), OM); if (F.hasPrologueData()) if (!isa<GlobalValue>(F.getPrologueData())) orderValue(F.getPrologueData(), OM); if (F.hasPersonalityFn()) if (!isa<GlobalValue>(F.getPersonalityFn())) orderValue(F.getPersonalityFn(), OM); } OM.LastGlobalConstantID = OM.size(); // Initializers of GlobalValues are processed in // BitcodeReader::ResolveGlobalAndAliasInits(). Match the order there rather // than ValueEnumerator, and match the code in predictValueUseListOrderImpl() // by giving IDs in reverse order. // // Since GlobalValues never reference each other directly (just through // initializers), their relative IDs only matter for determining order of // uses in their initializers. for (const Function &F : M) orderValue(&F, OM); for (const GlobalAlias &A : M.aliases()) orderValue(&A, OM); for (const GlobalVariable &G : M.globals()) orderValue(&G, OM); OM.LastGlobalValueID = OM.size(); for (const Function &F : M) { if (F.isDeclaration()) continue; // Here we need to match the union of ValueEnumerator::incorporateFunction() // and WriteFunction(). Basic blocks are implicitly declared before // anything else (by declaring their size). for (const BasicBlock &BB : F) orderValue(&BB, OM); for (const Argument &A : F.args()) orderValue(&A, OM); for (const BasicBlock &BB : F) for (const Instruction &I : BB) for (const Value *Op : I.operands()) if ((isa<Constant>(*Op) && !isa<GlobalValue>(*Op)) || isa<InlineAsm>(*Op)) orderValue(Op, OM); for (const BasicBlock &BB : F) for (const Instruction &I : BB) orderValue(&I, OM); } return OM; } static void predictValueUseListOrderImpl(const Value *V, const Function *F, unsigned ID, const OrderMap &OM, UseListOrderStack &Stack) { // Predict use-list order for this one. typedef std::pair<const Use *, unsigned> Entry; SmallVector<Entry, 64> List; for (const Use &U : V->uses()) // Check if this user will be serialized. if (OM.lookup(U.getUser()).first) List.push_back(std::make_pair(&U, List.size())); if (List.size() < 2) // We may have lost some users. return; bool IsGlobalValue = OM.isGlobalValue(ID); std::sort(List.begin(), List.end(), [&](const Entry &L, const Entry &R) { const Use *LU = L.first; const Use *RU = R.first; if (LU == RU) return false; auto LID = OM.lookup(LU->getUser()).first; auto RID = OM.lookup(RU->getUser()).first; // Global values are processed in reverse order. // // Moreover, initializers of GlobalValues are set *after* all the globals // have been read (despite having earlier IDs). Rather than awkwardly // modeling this behaviour here, orderModule() has assigned IDs to // initializers of GlobalValues before GlobalValues themselves. if (OM.isGlobalValue(LID) && OM.isGlobalValue(RID)) return LID < RID; // If ID is 4, then expect: 7 6 5 1 2 3. if (LID < RID) { if (RID <= ID) if (!IsGlobalValue) // GlobalValue uses don't get reversed. return true; return false; } if (RID < LID) { if (LID <= ID) if (!IsGlobalValue) // GlobalValue uses don't get reversed. return false; return true; } // LID and RID are equal, so we have different operands of the same user. // Assume operands are added in order for all instructions. if (LID <= ID) if (!IsGlobalValue) // GlobalValue uses don't get reversed. return LU->getOperandNo() < RU->getOperandNo(); return LU->getOperandNo() > RU->getOperandNo(); }); if (std::is_sorted( List.begin(), List.end(), [](const Entry &L, const Entry &R) { return L.second < R.second; })) // Order is already correct. return; // Store the shuffle. Stack.emplace_back(V, F, List.size()); assert(List.size() == Stack.back().Shuffle.size() && "Wrong size"); for (size_t I = 0, E = List.size(); I != E; ++I) Stack.back().Shuffle[I] = List[I].second; } static void predictValueUseListOrder(const Value *V, const Function *F, OrderMap &OM, UseListOrderStack &Stack) { auto &IDPair = OM[V]; assert(IDPair.first && "Unmapped value"); if (IDPair.second) // Already predicted. return; // Do the actual prediction. IDPair.second = true; if (!V->use_empty() && std::next(V->use_begin()) != V->use_end()) predictValueUseListOrderImpl(V, F, IDPair.first, OM, Stack); // Recursive descent into constants. if (const Constant *C = dyn_cast<Constant>(V)) if (C->getNumOperands()) // Visit GlobalValues. for (const Value *Op : C->operands()) if (isa<Constant>(Op)) // Visit GlobalValues. predictValueUseListOrder(Op, F, OM, Stack); } static UseListOrderStack predictUseListOrder(const Module &M) { OrderMap OM = orderModule(M); // Use-list orders need to be serialized after all the users have been added // to a value, or else the shuffles will be incomplete. Store them per // function in a stack. // // Aside from function order, the order of values doesn't matter much here. UseListOrderStack Stack; // We want to visit the functions backward now so we can list function-local // constants in the last Function they're used in. Module-level constants // have already been visited above. for (auto I = M.rbegin(), E = M.rend(); I != E; ++I) { const Function &F = *I; if (F.isDeclaration()) continue; for (const BasicBlock &BB : F) predictValueUseListOrder(&BB, &F, OM, Stack); for (const Argument &A : F.args()) predictValueUseListOrder(&A, &F, OM, Stack); for (const BasicBlock &BB : F) for (const Instruction &I : BB) for (const Value *Op : I.operands()) if (isa<Constant>(*Op) || isa<InlineAsm>(*Op)) // Visit GlobalValues. predictValueUseListOrder(Op, &F, OM, Stack); for (const BasicBlock &BB : F) for (const Instruction &I : BB) predictValueUseListOrder(&I, &F, OM, Stack); } // Visit globals last, since the module-level use-list block will be seen // before the function bodies are processed. for (const GlobalVariable &G : M.globals()) predictValueUseListOrder(&G, nullptr, OM, Stack); for (const Function &F : M) predictValueUseListOrder(&F, nullptr, OM, Stack); for (const GlobalAlias &A : M.aliases()) predictValueUseListOrder(&A, nullptr, OM, Stack); for (const GlobalVariable &G : M.globals()) if (G.hasInitializer()) predictValueUseListOrder(G.getInitializer(), nullptr, OM, Stack); for (const GlobalAlias &A : M.aliases()) predictValueUseListOrder(A.getAliasee(), nullptr, OM, Stack); for (const Function &F : M) { if (F.hasPrefixData()) predictValueUseListOrder(F.getPrefixData(), nullptr, OM, Stack); if (F.hasPrologueData()) predictValueUseListOrder(F.getPrologueData(), nullptr, OM, Stack); if (F.hasPersonalityFn()) predictValueUseListOrder(F.getPersonalityFn(), nullptr, OM, Stack); } return Stack; } static bool isIntOrIntVectorValue(const std::pair<const Value*, unsigned> &V) { return V.first->getType()->isIntOrIntVectorTy(); } ValueEnumerator::ValueEnumerator(const Module &M, bool ShouldPreserveUseListOrder) : HasMDString(false), HasDILocation(false), HasGenericDINode(false), ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) { if (ShouldPreserveUseListOrder) UseListOrders = predictUseListOrder(M); // Enumerate the global variables. for (const GlobalVariable &GV : M.globals()) EnumerateValue(&GV); // Enumerate the functions. for (const Function & F : M) { EnumerateValue(&F); EnumerateAttributes(F.getAttributes()); } // Enumerate the aliases. for (const GlobalAlias &GA : M.aliases()) EnumerateValue(&GA); // Remember what is the cutoff between globalvalue's and other constants. unsigned FirstConstant = Values.size(); // Enumerate the global variable initializers. for (const GlobalVariable &GV : M.globals()) if (GV.hasInitializer()) EnumerateValue(GV.getInitializer()); // Enumerate the aliasees. for (const GlobalAlias &GA : M.aliases()) EnumerateValue(GA.getAliasee()); // Enumerate the prefix data constants. for (const Function &F : M) if (F.hasPrefixData()) EnumerateValue(F.getPrefixData()); // Enumerate the prologue data constants. for (const Function &F : M) if (F.hasPrologueData()) EnumerateValue(F.getPrologueData()); // Enumerate the personality functions. for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I) if (I->hasPersonalityFn()) EnumerateValue(I->getPersonalityFn()); // Enumerate the metadata type. // // TODO: Move this to ValueEnumerator::EnumerateOperandType() once bitcode // only encodes the metadata type when it's used as a value. EnumerateType(Type::getMetadataTy(M.getContext())); // Insert constants and metadata that are named at module level into the slot // pool so that the module symbol table can refer to them... EnumerateValueSymbolTable(M.getValueSymbolTable()); EnumerateNamedMetadata(M); SmallVector<std::pair<unsigned, MDNode *>, 8> MDs; // Enumerate types used by function bodies and argument lists. for (const Function &F : M) { for (const Argument &A : F.args()) EnumerateType(A.getType()); // Enumerate metadata attached to this function. F.getAllMetadata(MDs); for (const auto &I : MDs) EnumerateMetadata(I.second); for (const BasicBlock &BB : F) for (const Instruction &I : BB) { for (const Use &Op : I.operands()) { auto *MD = dyn_cast<MetadataAsValue>(&Op); if (!MD) { EnumerateOperandType(Op); continue; } // Local metadata is enumerated during function-incorporation. if (isa<LocalAsMetadata>(MD->getMetadata())) continue; EnumerateMetadata(MD->getMetadata()); } EnumerateType(I.getType()); if (const CallInst *CI = dyn_cast<CallInst>(&I)) EnumerateAttributes(CI->getAttributes()); else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) EnumerateAttributes(II->getAttributes()); // Enumerate metadata attached with this instruction. MDs.clear(); I.getAllMetadataOtherThanDebugLoc(MDs); for (unsigned i = 0, e = MDs.size(); i != e; ++i) EnumerateMetadata(MDs[i].second); // Don't enumerate the location directly -- it has a special record // type -- but enumerate its operands. if (DILocation *L = I.getDebugLoc()) EnumerateMDNodeOperands(L); } } // Optimize constant ordering. OptimizeConstants(FirstConstant, Values.size()); } unsigned ValueEnumerator::getInstructionID(const Instruction *Inst) const { InstructionMapType::const_iterator I = InstructionMap.find(Inst); assert(I != InstructionMap.end() && "Instruction is not mapped!"); return I->second; } unsigned ValueEnumerator::getComdatID(const Comdat *C) const { unsigned ComdatID = Comdats.idFor(C); assert(ComdatID && "Comdat not found!"); return ComdatID; } void ValueEnumerator::setInstructionID(const Instruction *I) { InstructionMap[I] = InstructionCount++; } unsigned ValueEnumerator::getValueID(const Value *V) const { if (auto *MD = dyn_cast<MetadataAsValue>(V)) return getMetadataID(MD->getMetadata()); ValueMapType::const_iterator I = ValueMap.find(V); assert(I != ValueMap.end() && "Value not in slotcalculator!"); return I->second-1; } void ValueEnumerator::dump() const { print(dbgs(), ValueMap, "Default"); dbgs() << '\n'; print(dbgs(), MDValueMap, "MetaData"); dbgs() << '\n'; } void ValueEnumerator::print(raw_ostream &OS, const ValueMapType &Map, const char *Name) const { OS << "Map Name: " << Name << "\n"; OS << "Size: " << Map.size() << "\n"; for (ValueMapType::const_iterator I = Map.begin(), E = Map.end(); I != E; ++I) { const Value *V = I->first; if (V->hasName()) OS << "Value: " << V->getName(); else OS << "Value: [null]\n"; V->dump(); OS << " Uses(" << std::distance(V->use_begin(),V->use_end()) << "):"; for (const Use &U : V->uses()) { if (&U != &*V->use_begin()) OS << ","; if(U->hasName()) OS << " " << U->getName(); else OS << " [null]"; } OS << "\n\n"; } } void ValueEnumerator::print(raw_ostream &OS, const MetadataMapType &Map, const char *Name) const { OS << "Map Name: " << Name << "\n"; OS << "Size: " << Map.size() << "\n"; for (auto I = Map.begin(), E = Map.end(); I != E; ++I) { const Metadata *MD = I->first; OS << "Metadata: slot = " << I->second << "\n"; MD->print(OS); } } /// OptimizeConstants - Reorder constant pool for denser encoding. void ValueEnumerator::OptimizeConstants(unsigned CstStart, unsigned CstEnd) { if (CstStart == CstEnd || CstStart+1 == CstEnd) return; if (ShouldPreserveUseListOrder) // Optimizing constants makes the use-list order difficult to predict. // Disable it for now when trying to preserve the order. return; std::stable_sort(Values.begin() + CstStart, Values.begin() + CstEnd, [this](const std::pair<const Value *, unsigned> &LHS, const std::pair<const Value *, unsigned> &RHS) { // Sort by plane. if (LHS.first->getType() != RHS.first->getType()) return getTypeID(LHS.first->getType()) < getTypeID(RHS.first->getType()); // Then by frequency. return LHS.second > RHS.second; }); // Ensure that integer and vector of integer constants are at the start of the // constant pool. This is important so that GEP structure indices come before // gep constant exprs. std::partition(Values.begin()+CstStart, Values.begin()+CstEnd, isIntOrIntVectorValue); // Rebuild the modified portion of ValueMap. for (; CstStart != CstEnd; ++CstStart) ValueMap[Values[CstStart].first] = CstStart+1; } /// EnumerateValueSymbolTable - Insert all of the values in the specified symbol /// table into the values table. void ValueEnumerator::EnumerateValueSymbolTable(const ValueSymbolTable &VST) { for (ValueSymbolTable::const_iterator VI = VST.begin(), VE = VST.end(); VI != VE; ++VI) EnumerateValue(VI->getValue()); } /// Insert all of the values referenced by named metadata in the specified /// module. void ValueEnumerator::EnumerateNamedMetadata(const Module &M) { for (Module::const_named_metadata_iterator I = M.named_metadata_begin(), E = M.named_metadata_end(); I != E; ++I) EnumerateNamedMDNode(I); } void ValueEnumerator::EnumerateNamedMDNode(const NamedMDNode *MD) { for (unsigned i = 0, e = MD->getNumOperands(); i != e; ++i) EnumerateMetadata(MD->getOperand(i)); } /// EnumerateMDNodeOperands - Enumerate all non-function-local values /// and types referenced by the given MDNode. void ValueEnumerator::EnumerateMDNodeOperands(const MDNode *N) { for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { Metadata *MD = N->getOperand(i); if (!MD) continue; assert(!isa<LocalAsMetadata>(MD) && "MDNodes cannot be function-local"); EnumerateMetadata(MD); } } void ValueEnumerator::EnumerateMetadata(const Metadata *MD) { assert( (isa<MDNode>(MD) || isa<MDString>(MD) || isa<ConstantAsMetadata>(MD)) && "Invalid metadata kind"); // Insert a dummy ID to block the co-recursive call to // EnumerateMDNodeOperands() from re-visiting MD in a cyclic graph. // // Return early if there's already an ID. if (!MDValueMap.insert(std::make_pair(MD, 0)).second) return; // Visit operands first to minimize RAUW. if (auto *N = dyn_cast<MDNode>(MD)) EnumerateMDNodeOperands(N); else if (auto *C = dyn_cast<ConstantAsMetadata>(MD)) EnumerateValue(C->getValue()); HasMDString |= isa<MDString>(MD); HasDILocation |= isa<DILocation>(MD); HasGenericDINode |= isa<GenericDINode>(MD); // Replace the dummy ID inserted above with the correct one. MDValueMap may // have changed by inserting operands, so we need a fresh lookup here. MDs.push_back(MD); MDValueMap[MD] = MDs.size(); } /// EnumerateFunctionLocalMetadataa - Incorporate function-local metadata /// information reachable from the metadata. void ValueEnumerator::EnumerateFunctionLocalMetadata( const LocalAsMetadata *Local) { // Check to see if it's already in! unsigned &MDValueID = MDValueMap[Local]; if (MDValueID) return; MDs.push_back(Local); MDValueID = MDs.size(); EnumerateValue(Local->getValue()); // Also, collect all function-local metadata for easy access. FunctionLocalMDs.push_back(Local); } void ValueEnumerator::EnumerateValue(const Value *V) { assert(!V->getType()->isVoidTy() && "Can't insert void values!"); assert(!isa<MetadataAsValue>(V) && "EnumerateValue doesn't handle Metadata!"); // Check to see if it's already in! unsigned &ValueID = ValueMap[V]; if (ValueID) { // Increment use count. Values[ValueID-1].second++; return; } if (auto *GO = dyn_cast<GlobalObject>(V)) if (const Comdat *C = GO->getComdat()) Comdats.insert(C); // Enumerate the type of this value. EnumerateType(V->getType()); if (const Constant *C = dyn_cast<Constant>(V)) { if (isa<GlobalValue>(C)) { // Initializers for globals are handled explicitly elsewhere. } else if (C->getNumOperands()) { // If a constant has operands, enumerate them. This makes sure that if a // constant has uses (for example an array of const ints), that they are // inserted also. // We prefer to enumerate them with values before we enumerate the user // itself. This makes it more likely that we can avoid forward references // in the reader. We know that there can be no cycles in the constants // graph that don't go through a global variable. for (User::const_op_iterator I = C->op_begin(), E = C->op_end(); I != E; ++I) if (!isa<BasicBlock>(*I)) // Don't enumerate BB operand to BlockAddress. EnumerateValue(*I); // Finally, add the value. Doing this could make the ValueID reference be // dangling, don't reuse it. Values.push_back(std::make_pair(V, 1U)); ValueMap[V] = Values.size(); return; } } // Add the value. Values.push_back(std::make_pair(V, 1U)); ValueID = Values.size(); } void ValueEnumerator::EnumerateType(Type *Ty) { unsigned *TypeID = &TypeMap[Ty]; // We've already seen this type. if (*TypeID) return; // If it is a non-anonymous struct, mark the type as being visited so that we // don't recursively visit it. This is safe because we allow forward // references of these in the bitcode reader. if (StructType *STy = dyn_cast<StructType>(Ty)) if (!STy->isLiteral()) *TypeID = ~0U; // Enumerate all of the subtypes before we enumerate this type. This ensures // that the type will be enumerated in an order that can be directly built. for (Type *SubTy : Ty->subtypes()) EnumerateType(SubTy); // Refresh the TypeID pointer in case the table rehashed. TypeID = &TypeMap[Ty]; // Check to see if we got the pointer another way. This can happen when // enumerating recursive types that hit the base case deeper than they start. // // If this is actually a struct that we are treating as forward ref'able, // then emit the definition now that all of its contents are available. if (*TypeID && *TypeID != ~0U) return; // Add this type now that its contents are all happily enumerated. Types.push_back(Ty); *TypeID = Types.size(); } // Enumerate the types for the specified value. If the value is a constant, // walk through it, enumerating the types of the constant. void ValueEnumerator::EnumerateOperandType(const Value *V) { EnumerateType(V->getType()); if (auto *MD = dyn_cast<MetadataAsValue>(V)) { assert(!isa<LocalAsMetadata>(MD->getMetadata()) && "Function-local metadata should be left for later"); EnumerateMetadata(MD->getMetadata()); return; } const Constant *C = dyn_cast<Constant>(V); if (!C) return; // If this constant is already enumerated, ignore it, we know its type must // be enumerated. if (ValueMap.count(C)) return; // This constant may have operands, make sure to enumerate the types in // them. for (const Value *Op : C->operands()) { // Don't enumerate basic blocks here, this happens as operands to // blockaddress. if (isa<BasicBlock>(Op)) continue; EnumerateOperandType(Op); } } void ValueEnumerator::EnumerateAttributes(AttributeSet PAL) { if (PAL.isEmpty()) return; // null is always 0. // Do a lookup. unsigned &Entry = AttributeMap[PAL]; if (Entry == 0) { // Never saw this before, add it. Attribute.push_back(PAL); Entry = Attribute.size(); } // Do lookups for all attribute groups. for (unsigned i = 0, e = PAL.getNumSlots(); i != e; ++i) { AttributeSet AS = PAL.getSlotAttributes(i); unsigned &Entry = AttributeGroupMap[AS]; if (Entry == 0) { AttributeGroups.push_back(AS); Entry = AttributeGroups.size(); } } } void ValueEnumerator::incorporateFunction(const Function &F) { InstructionCount = 0; NumModuleValues = Values.size(); NumModuleMDs = MDs.size(); // Adding function arguments to the value table. for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) EnumerateValue(I); FirstFuncConstantID = Values.size(); // Add all function-level constants to the value table. for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E; ++I) for (User::const_op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) { if ((isa<Constant>(*OI) && !isa<GlobalValue>(*OI)) || isa<InlineAsm>(*OI)) EnumerateValue(*OI); } BasicBlocks.push_back(BB); ValueMap[BB] = BasicBlocks.size(); } // Optimize the constant layout. OptimizeConstants(FirstFuncConstantID, Values.size()); // Add the function's parameter attributes so they are available for use in // the function's instruction. EnumerateAttributes(F.getAttributes()); FirstInstID = Values.size(); SmallVector<LocalAsMetadata *, 8> FnLocalMDVector; // Add all of the instructions. for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E; ++I) { for (User::const_op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) { if (auto *MD = dyn_cast<MetadataAsValue>(&*OI)) if (auto *Local = dyn_cast<LocalAsMetadata>(MD->getMetadata())) // Enumerate metadata after the instructions they might refer to. FnLocalMDVector.push_back(Local); } if (!I->getType()->isVoidTy()) EnumerateValue(I); } } // Add all of the function-local metadata. for (unsigned i = 0, e = FnLocalMDVector.size(); i != e; ++i) EnumerateFunctionLocalMetadata(FnLocalMDVector[i]); } void ValueEnumerator::purgeFunction() { /// Remove purged values from the ValueMap. for (unsigned i = NumModuleValues, e = Values.size(); i != e; ++i) ValueMap.erase(Values[i].first); for (unsigned i = NumModuleMDs, e = MDs.size(); i != e; ++i) MDValueMap.erase(MDs[i]); for (unsigned i = 0, e = BasicBlocks.size(); i != e; ++i) ValueMap.erase(BasicBlocks[i]); Values.resize(NumModuleValues); MDs.resize(NumModuleMDs); BasicBlocks.clear(); FunctionLocalMDs.clear(); } static void IncorporateFunctionInfoGlobalBBIDs(const Function *F, DenseMap<const BasicBlock*, unsigned> &IDMap) { unsigned Counter = 0; for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB) IDMap[BB] = ++Counter; } /// getGlobalBasicBlockID - This returns the function-specific ID for the /// specified basic block. This is relatively expensive information, so it /// should only be used by rare constructs such as address-of-label. unsigned ValueEnumerator::getGlobalBasicBlockID(const BasicBlock *BB) const { unsigned &Idx = GlobalBasicBlockIDs[BB]; if (Idx != 0) return Idx-1; IncorporateFunctionInfoGlobalBBIDs(BB->getParent(), GlobalBasicBlockIDs); return getGlobalBasicBlockID(BB); } uint64_t ValueEnumerator::computeBitsRequiredForTypeIndicies() const { return Log2_32_Ceil(getTypes().size() + 1); }
0
repos/DirectXShaderCompiler/lib/Bitcode
repos/DirectXShaderCompiler/lib/Bitcode/Writer/LLVMBuild.txt
;===- ./lib/Bitcode/Writer/LLVMBuild.txt -----------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = BitWriter parent = Bitcode required_libraries = Core Support
0
repos/DirectXShaderCompiler/lib/Bitcode
repos/DirectXShaderCompiler/lib/Bitcode/Writer/BitcodeWriterPass.cpp
//===- BitcodeWriterPass.cpp - Bitcode writing pass -----------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // BitcodeWriterPass implementation. // //===----------------------------------------------------------------------===// #include "llvm/Bitcode/BitcodeWriterPass.h" #include "llvm/Bitcode/ReaderWriter.h" #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" #include "llvm/Pass.h" using namespace llvm; PreservedAnalyses BitcodeWriterPass::run(Module &M) { WriteBitcodeToFile(&M, OS, ShouldPreserveUseListOrder); return PreservedAnalyses::all(); } namespace { class WriteBitcodePass : public ModulePass { raw_ostream &OS; // raw_ostream to print on bool ShouldPreserveUseListOrder; public: static char ID; // Pass identification, replacement for typeid explicit WriteBitcodePass(raw_ostream &o, bool ShouldPreserveUseListOrder) : ModulePass(ID), OS(o), ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) {} StringRef getPassName() const override { return "Bitcode Writer"; } bool runOnModule(Module &M) override { WriteBitcodeToFile(&M, OS, ShouldPreserveUseListOrder); return false; } }; } char WriteBitcodePass::ID = 0; ModulePass *llvm::createBitcodeWriterPass(raw_ostream &Str, bool ShouldPreserveUseListOrder) { return new WriteBitcodePass(Str, ShouldPreserveUseListOrder); }
0
repos/DirectXShaderCompiler/lib/Bitcode
repos/DirectXShaderCompiler/lib/Bitcode/Reader/BitstreamReader.cpp
//===- BitstreamReader.cpp - BitstreamReader implementation ---------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Bitcode/BitstreamReader.h" using namespace llvm; //===----------------------------------------------------------------------===// // BitstreamCursor implementation //===----------------------------------------------------------------------===// void BitstreamCursor::freeState() { // Free all the Abbrevs. CurAbbrevs.clear(); // Free all the Abbrevs in the block scope. BlockScope.clear(); } /// EnterSubBlock - Having read the ENTER_SUBBLOCK abbrevid, enter /// the block, and return true if the block has an error. bool BitstreamCursor::EnterSubBlock(unsigned BlockID, unsigned *NumWordsP) { // Save the current block's state on BlockScope. BlockScope.push_back(Block(CurCodeSize)); BlockScope.back().PrevAbbrevs.swap(CurAbbrevs); // Add the abbrevs specific to this block to the CurAbbrevs list. if (const BitstreamReader::BlockInfo *Info = BitStream->getBlockInfo(BlockID)) { CurAbbrevs.insert(CurAbbrevs.end(), Info->Abbrevs.begin(), Info->Abbrevs.end()); } // Get the codesize of this block. CurCodeSize = ReadVBR(bitc::CodeLenWidth); // We can't read more than MaxChunkSize at a time if (CurCodeSize > MaxChunkSize) return true; SkipToFourByteBoundary(); unsigned NumWords = Read(bitc::BlockSizeWidth); if (NumWordsP) *NumWordsP = NumWords; // Validate that this block is sane. return CurCodeSize == 0 || AtEndOfStream(); } static uint64_t readAbbreviatedField(BitstreamCursor &Cursor, const BitCodeAbbrevOp &Op) { assert(!Op.isLiteral() && "Not to be used with literals!"); // Decode the value as we are commanded. switch (Op.getEncoding()) { case BitCodeAbbrevOp::Array: case BitCodeAbbrevOp::Blob: llvm_unreachable("Should not reach here"); case BitCodeAbbrevOp::Fixed: assert((unsigned)Op.getEncodingData() <= Cursor.MaxChunkSize); return Cursor.Read((unsigned)Op.getEncodingData()); case BitCodeAbbrevOp::VBR: assert((unsigned)Op.getEncodingData() <= Cursor.MaxChunkSize); return Cursor.ReadVBR64((unsigned)Op.getEncodingData()); case BitCodeAbbrevOp::Char6: return BitCodeAbbrevOp::DecodeChar6(Cursor.Read(6)); } llvm_unreachable("invalid abbreviation encoding"); } static void skipAbbreviatedField(BitstreamCursor &Cursor, const BitCodeAbbrevOp &Op) { assert(!Op.isLiteral() && "Not to be used with literals!"); // Decode the value as we are commanded. switch (Op.getEncoding()) { case BitCodeAbbrevOp::Array: case BitCodeAbbrevOp::Blob: llvm_unreachable("Should not reach here"); case BitCodeAbbrevOp::Fixed: assert((unsigned)Op.getEncodingData() <= Cursor.MaxChunkSize); Cursor.Read((unsigned)Op.getEncodingData()); break; case BitCodeAbbrevOp::VBR: assert((unsigned)Op.getEncodingData() <= Cursor.MaxChunkSize); Cursor.ReadVBR64((unsigned)Op.getEncodingData()); break; case BitCodeAbbrevOp::Char6: Cursor.Read(6); break; } } /// skipRecord - Read the current record and discard it. void BitstreamCursor::skipRecord(unsigned AbbrevID) { // Skip unabbreviated records by reading past their entries. if (AbbrevID == bitc::UNABBREV_RECORD) { unsigned Code = ReadVBR(6); (void)Code; unsigned NumElts = ReadVBR(6); for (unsigned i = 0; i != NumElts; ++i) (void)ReadVBR64(6); return; } const BitCodeAbbrev *Abbv = getAbbrev(AbbrevID); for (unsigned i = 0, e = Abbv->getNumOperandInfos(); i != e; ++i) { const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i); if (Op.isLiteral()) continue; if (Op.getEncoding() != BitCodeAbbrevOp::Array && Op.getEncoding() != BitCodeAbbrevOp::Blob) { skipAbbreviatedField(*this, Op); continue; } if (Op.getEncoding() == BitCodeAbbrevOp::Array) { // Array case. Read the number of elements as a vbr6. unsigned NumElts = ReadVBR(6); // Get the element encoding. assert(i+2 == e && "array op not second to last?"); const BitCodeAbbrevOp &EltEnc = Abbv->getOperandInfo(++i); #if 1 // HLSL Change - Make skipping go brrrrrrrrrrr { const auto &Op = EltEnc; auto &Cursor = *this; auto CurBit = Cursor.GetCurrentBitNo(); // Decode the value as we are commanded. switch (EltEnc.getEncoding()) { case BitCodeAbbrevOp::Array: case BitCodeAbbrevOp::Blob: llvm_unreachable("Should not reach here"); case BitCodeAbbrevOp::Fixed: assert((unsigned)Op.getEncodingData() <= Cursor.MaxChunkSize); Cursor.JumpToBit(CurBit + NumElts * Op.getEncodingData()); break; case BitCodeAbbrevOp::VBR: assert((unsigned)Op.getEncodingData() <= Cursor.MaxChunkSize); for (; NumElts; --NumElts) Cursor.ReadVBR64((unsigned)Op.getEncodingData()); break; case BitCodeAbbrevOp::Char6: Cursor.JumpToBit(CurBit + NumElts * 6); break; } } #else // Read all the elements. for (; NumElts; --NumElts) skipAbbreviatedField(*this, EltEnc); #endif continue; } assert(Op.getEncoding() == BitCodeAbbrevOp::Blob); // Blob case. Read the number of bytes as a vbr6. unsigned NumElts = ReadVBR(6); SkipToFourByteBoundary(); // 32-bit alignment // Figure out where the end of this blob will be including tail padding. size_t NewEnd = GetCurrentBitNo()+((NumElts+3)&~3)*8; // If this would read off the end of the bitcode file, just set the // record to empty and return. if (!canSkipToPos(NewEnd/8)) { NextChar = BitStream->getBitcodeBytes().getExtent(); break; } // Skip over the blob. JumpToBit(NewEnd); } } // HLSL Change - Begin unsigned BitstreamCursor::peekRecord(unsigned AbbrevID) { auto last_bit_pos = GetCurrentBitNo(); if (AbbrevID == bitc::UNABBREV_RECORD) { unsigned Code = ReadVBR(6); this->JumpToBit(last_bit_pos); return Code; } const BitCodeAbbrev *Abbv = getAbbrev(AbbrevID); // Read the record code first. assert(Abbv->getNumOperandInfos() != 0 && "no record code in abbreviation?"); const BitCodeAbbrevOp &CodeOp = Abbv->getOperandInfo(0); unsigned Code; if (CodeOp.isLiteral()) Code = CodeOp.getLiteralValue(); else { if (CodeOp.getEncoding() == BitCodeAbbrevOp::Array || CodeOp.getEncoding() == BitCodeAbbrevOp::Blob) report_fatal_error("Abbreviation starts with an Array or a Blob"); Code = readAbbreviatedField(*this, CodeOp); } this->JumpToBit(last_bit_pos); return Code; } template<typename T> void BitstreamCursor::AddRecordElements(BitCodeAbbrevOp::Encoding enc, uint64_t encData, unsigned NumElts, SmallVectorImpl<T> &Vals) { const unsigned size = (unsigned)encData; if (enc == BitCodeAbbrevOp::VBR) { assert((unsigned)encData <= MaxChunkSize); for (; NumElts; --NumElts) { Vals.push_back((T)ReadVBR64(size)); } } else if (enc == BitCodeAbbrevOp::Char6) { assert((unsigned)encData <= MaxChunkSize); for (; NumElts; --NumElts) { Vals.push_back(BitCodeAbbrevOp::DecodeChar6(Read(6))); } } else { llvm_unreachable("Unknown kind of thing"); } } // HLSL Change - End unsigned BitstreamCursor::readRecord(unsigned AbbrevID, SmallVectorImpl<uint64_t> &Vals, StringRef *Blob, SmallVectorImpl<uint8_t> *Uint8Vals // HLSL Change ) { if (AbbrevID == bitc::UNABBREV_RECORD) { unsigned Code = ReadVBR(6); unsigned NumElts = ReadVBR(6); if (Uint8Vals) { for (unsigned i = 0; i != NumElts; ++i) Uint8Vals->push_back((uint8_t)ReadVBR64(6)); } else { for (unsigned i = 0; i != NumElts; ++i) Vals.push_back(ReadVBR64(6)); } return Code; } const BitCodeAbbrev *Abbv = getAbbrev(AbbrevID); // Read the record code first. assert(Abbv->getNumOperandInfos() != 0 && "no record code in abbreviation?"); const BitCodeAbbrevOp &CodeOp = Abbv->getOperandInfo(0); unsigned Code; if (CodeOp.isLiteral()) Code = CodeOp.getLiteralValue(); else { if (CodeOp.getEncoding() == BitCodeAbbrevOp::Array || CodeOp.getEncoding() == BitCodeAbbrevOp::Blob) report_fatal_error("Abbreviation starts with an Array or a Blob"); Code = readAbbreviatedField(*this, CodeOp); } for (unsigned i = 1, e = Abbv->getNumOperandInfos(); i != e; ++i) { const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i); if (Op.isLiteral()) { Vals.push_back(Op.getLiteralValue()); continue; } if (Op.getEncoding() != BitCodeAbbrevOp::Array && Op.getEncoding() != BitCodeAbbrevOp::Blob) { Vals.push_back(readAbbreviatedField(*this, Op)); continue; } if (Op.getEncoding() == BitCodeAbbrevOp::Array) { // Array case. Read the number of elements as a vbr6. unsigned NumElts = ReadVBR(6); // Get the element encoding. if (i + 2 != e) report_fatal_error("Array op not second to last"); const BitCodeAbbrevOp &EltEnc = Abbv->getOperandInfo(++i); if (!EltEnc.isEncoding()) report_fatal_error( "Array element type has to be an encoding of a type"); if (EltEnc.getEncoding() == BitCodeAbbrevOp::Array || EltEnc.getEncoding() == BitCodeAbbrevOp::Blob) report_fatal_error("Array element type can't be an Array or a Blob"); #if 1 // HLSL Change // Read all the elements a little faster. { BitCodeAbbrevOp::Encoding enc = EltEnc.getEncoding(); uint64_t encData = 0; if (EltEnc.hasEncodingData()) encData = EltEnc.getEncodingData(); unsigned size = (unsigned)encData; if (Uint8Vals) { if (enc == BitCodeAbbrevOp::Fixed) { assert((unsigned)encData <= MaxChunkSize); assert((unsigned)encData == 8); // Special optimization for fixed elements that are 8 bits Uint8Vals->resize(NumElts); uint8_t *ptr = Uint8Vals->data(); unsigned i = 0; constexpr unsigned BytesInWord = sizeof(size_t); // First, read word by word instead of byte by byte for (; NumElts >= BytesInWord; NumElts -= BytesInWord) { const size_t e = Read(BytesInWord * 8); memcpy(ptr + i, &e, sizeof(e)); i += BytesInWord; } for (; NumElts; --NumElts) Uint8Vals->operator[](i++) = (uint8_t)Read(8); } else { AddRecordElements(enc, encData, NumElts, *Uint8Vals); } } else { if (enc == BitCodeAbbrevOp::Fixed) { assert((unsigned)encData <= MaxChunkSize); Vals.reserve(Vals.size() + NumElts); for (; NumElts; --NumElts) Vals.push_back(Read(size)); } else { AddRecordElements(enc, encData, NumElts, Vals); } } } #else // HLSL Change // Read all the elements. for (; NumElts; --NumElts) Vals.push_back(readAbbreviatedField(*this, EltEnc)); #endif // HLSL Change continue; } assert(Op.getEncoding() == BitCodeAbbrevOp::Blob); // Blob case. Read the number of bytes as a vbr6. unsigned NumElts = ReadVBR(6); SkipToFourByteBoundary(); // 32-bit alignment // Figure out where the end of this blob will be including tail padding. size_t CurBitPos = GetCurrentBitNo(); size_t NewEnd = CurBitPos+((NumElts+3)&~3)*8; // If this would read off the end of the bitcode file, just set the // record to empty and return. if (!canSkipToPos(NewEnd/8)) { Vals.append(NumElts, 0); NextChar = BitStream->getBitcodeBytes().getExtent(); break; } // Otherwise, inform the streamer that we need these bytes in memory. const char *Ptr = (const char*) BitStream->getBitcodeBytes().getPointer(CurBitPos/8, NumElts); // If we can return a reference to the data, do so to avoid copying it. if (Blob) { *Blob = StringRef(Ptr, NumElts); } else { // Otherwise, unpack into Vals with zero extension. for (; NumElts; --NumElts) Vals.push_back((unsigned char)*Ptr++); } // Skip over tail padding. JumpToBit(NewEnd); } return Code; } void BitstreamCursor::ReadAbbrevRecord() { BitCodeAbbrev *Abbv = new BitCodeAbbrev(); unsigned NumOpInfo = ReadVBR(5); for (unsigned i = 0; i != NumOpInfo; ++i) { bool IsLiteral = Read(1); if (IsLiteral) { Abbv->Add(BitCodeAbbrevOp(ReadVBR64(8))); continue; } BitCodeAbbrevOp::Encoding E = (BitCodeAbbrevOp::Encoding)Read(3); if (BitCodeAbbrevOp::hasEncodingData(E)) { uint64_t Data = ReadVBR64(5); // As a special case, handle fixed(0) (i.e., a fixed field with zero bits) // and vbr(0) as a literal zero. This is decoded the same way, and avoids // a slow path in Read() to have to handle reading zero bits. if ((E == BitCodeAbbrevOp::Fixed || E == BitCodeAbbrevOp::VBR) && Data == 0) { Abbv->Add(BitCodeAbbrevOp(0)); continue; } if ((E == BitCodeAbbrevOp::Fixed || E == BitCodeAbbrevOp::VBR) && Data > MaxChunkSize) report_fatal_error( "Fixed or VBR abbrev record with size > MaxChunkData"); Abbv->Add(BitCodeAbbrevOp(E, Data)); } else Abbv->Add(BitCodeAbbrevOp(E)); } if (Abbv->getNumOperandInfos() == 0) report_fatal_error("Abbrev record with no operands"); CurAbbrevs.push_back(Abbv); } bool BitstreamCursor::ReadBlockInfoBlock(unsigned *pCount) { // If this is the second stream to get to the block info block, skip it. if (BitStream->hasBlockInfoRecords()) return SkipBlock(); if (EnterSubBlock(bitc::BLOCKINFO_BLOCK_ID)) return true; SmallVector<uint64_t, 64> Record; BitstreamReader::BlockInfo *CurBlockInfo = nullptr; // Read all the records for this module. while (1) { BitstreamEntry Entry = advanceSkippingSubblocks(AF_DontAutoprocessAbbrevs, pCount); switch (Entry.Kind) { case llvm::BitstreamEntry::SubBlock: // Handled for us already. case llvm::BitstreamEntry::Error: return true; case llvm::BitstreamEntry::EndBlock: return false; case llvm::BitstreamEntry::Record: // The interesting case. break; } // Read abbrev records, associate them with CurBID. if (Entry.ID == bitc::DEFINE_ABBREV) { if (!CurBlockInfo) return true; ReadAbbrevRecord(); // ReadAbbrevRecord installs the abbrev in CurAbbrevs. Move it to the // appropriate BlockInfo. CurBlockInfo->Abbrevs.push_back(std::move(CurAbbrevs.back())); CurAbbrevs.pop_back(); continue; } // Read a record. Record.clear(); switch (readRecord(Entry.ID, Record)) { default: break; // Default behavior, ignore unknown content. case bitc::BLOCKINFO_CODE_SETBID: if (Record.size() < 1) return true; CurBlockInfo = &BitStream->getOrCreateBlockInfo((unsigned)Record[0]); break; case bitc::BLOCKINFO_CODE_BLOCKNAME: { if (!CurBlockInfo) return true; if (BitStream->isIgnoringBlockInfoNames()) break; // Ignore name. std::string Name; for (unsigned i = 0, e = Record.size(); i != e; ++i) Name += (char)Record[i]; CurBlockInfo->Name = Name; break; } case bitc::BLOCKINFO_CODE_SETRECORDNAME: { if (!CurBlockInfo) return true; if (BitStream->isIgnoringBlockInfoNames()) break; // Ignore name. std::string Name; for (unsigned i = 1, e = Record.size(); i != e; ++i) Name += (char)Record[i]; CurBlockInfo->RecordNames.push_back(std::make_pair((unsigned)Record[0], Name)); break; } } } } // HLSL Change Starts void BitstreamUseTracker::track(BitstreamUseTracker *BT, uint64_t begin, uint64_t end) { if (BT) BT->insert(begin, end); } BitstreamUseTracker::ExtendResult BitstreamUseTracker::extendRange(UseRange &Curr, UseRange &NewRange) { // Most likely case first. if (Curr.first <= NewRange.first && Curr.second < NewRange.second) { Curr.second = NewRange.second; return ExtendedEnd; } if (Curr.first <= NewRange.first && NewRange.second <= Curr.second) { return Included; // already included. } if (NewRange.first < Curr.first && NewRange.second <= Curr.second) { return ExtendedBegin; } if (NewRange.first < Curr.first && Curr.second < NewRange.second) { return ExtendedBoth; } return Exclusive; } bool BitstreamUseTracker::isDense(uint64_t endBitoffset) const { return Ranges.size() == 1 && Ranges[0].first == 0 && Ranges[0].second == endBitoffset; } bool BitstreamUseTracker::considerMergeRight(size_t idx) { bool changed = false; while (idx < Ranges.size() - 1) { if (Ranges[idx].second >= Ranges[idx + 1].first) { Ranges[idx].second = Ranges[idx + 1].second; Ranges.erase(&Ranges[idx + 1]); changed = true; } } return changed; } void BitstreamUseTracker::insert(uint64_t begin, uint64_t end) { UseRange IR(begin, end); for (size_t i = 0, E = Ranges.size(); i < E; ++i) { ExtendResult ER = extendRange(Ranges[i], IR); switch (ER) { case Included: return; case ExtendedEnd: considerMergeRight(i); return; case ExtendedBegin: if (i > 0) considerMergeRight(i - 1); return; case ExtendedBoth: if (i > 0) { if (!considerMergeRight(i - 1)) considerMergeRight(i); } else considerMergeRight(i); return; case Exclusive: // If completely to the left, then insert there; otherwise, // keep traversing in order. if (end <= Ranges[i].first) { Ranges.insert(&Ranges[i], IR); return; } } } // This range goes at the end. Ranges.push_back(IR); } BitstreamUseTracker::ScopeTrack BitstreamUseTracker::scope_track(BitstreamCursor *BC) { ScopeTrack Result; Result.BC = BC; Result.begin = BC->GetCurrentBitNo(); return Result; } // HLSL Change Ends
0
repos/DirectXShaderCompiler/lib/Bitcode
repos/DirectXShaderCompiler/lib/Bitcode/Reader/CMakeLists.txt
set(HLSL_IGNORE_SOURCES BitReader.cpp) add_llvm_library(LLVMBitReader # BitReader.cpp # HLSL Change - this is just the C wrapper BitcodeReader.cpp BitstreamReader.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/Bitcode ) add_dependencies(LLVMBitReader intrinsics_gen)
0
repos/DirectXShaderCompiler/lib/Bitcode
repos/DirectXShaderCompiler/lib/Bitcode/Reader/LLVMBuild.txt
;===- ./lib/Bitcode/Reader/LLVMBuild.txt -----------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = BitReader parent = Bitcode required_libraries = Core Support
0
repos/DirectXShaderCompiler/lib/Bitcode
repos/DirectXShaderCompiler/lib/Bitcode/Reader/BitReader.cpp
//===-- BitReader.cpp -----------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm-c/BitReader.h" #include "llvm/Bitcode/ReaderWriter.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/raw_ostream.h" #include <cstring> #include <string> using namespace llvm; /* Builds a module from the bitcode in the specified memory buffer, returning a reference to the module via the OutModule parameter. Returns 0 on success. Optionally returns a human-readable error message via OutMessage. */ LLVMBool LLVMParseBitcode(LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutModule, char **OutMessage) { return LLVMParseBitcodeInContext(wrap(&getGlobalContext()), MemBuf, OutModule, OutMessage); } LLVMBool LLVMParseBitcodeInContext(LLVMContextRef ContextRef, LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutModule, char **OutMessage) { MemoryBufferRef Buf = unwrap(MemBuf)->getMemBufferRef(); LLVMContext &Ctx = *unwrap(ContextRef); std::string Message; raw_string_ostream Stream(Message); DiagnosticPrinterRawOStream DP(Stream); ErrorOr<std::unique_ptr<Module>> ModuleOrErr = parseBitcodeFile( Buf, Ctx, [&](const DiagnosticInfo &DI) { DI.print(DP); }); if (ModuleOrErr.getError()) { if (OutMessage) { Stream.flush(); *OutMessage = strdup(Message.c_str()); } *OutModule = wrap((Module*)nullptr); return 1; } *OutModule = wrap(ModuleOrErr.get().release()); return 0; } /* Reads a module from the specified path, returning via the OutModule parameter a module provider which performs lazy deserialization. Returns 0 on success. Optionally returns a human-readable error message via OutMessage. */ LLVMBool LLVMGetBitcodeModuleInContext(LLVMContextRef ContextRef, LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutM, char **OutMessage) { std::string Message; std::unique_ptr<MemoryBuffer> Owner(unwrap(MemBuf)); ErrorOr<std::unique_ptr<Module>> ModuleOrErr = getLazyBitcodeModule(std::move(Owner), *unwrap(ContextRef)); Owner.release(); if (std::error_code EC = ModuleOrErr.getError()) { *OutM = wrap((Module *)nullptr); if (OutMessage) *OutMessage = strdup(EC.message().c_str()); return 1; } *OutM = wrap(ModuleOrErr.get().release()); return 0; } LLVMBool LLVMGetBitcodeModule(LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutM, char **OutMessage) { return LLVMGetBitcodeModuleInContext(LLVMGetGlobalContext(), MemBuf, OutM, OutMessage); } /* Deprecated: Use LLVMGetBitcodeModuleInContext instead. */ LLVMBool LLVMGetBitcodeModuleProviderInContext(LLVMContextRef ContextRef, LLVMMemoryBufferRef MemBuf, LLVMModuleProviderRef *OutMP, char **OutMessage) { return LLVMGetBitcodeModuleInContext(ContextRef, MemBuf, reinterpret_cast<LLVMModuleRef*>(OutMP), OutMessage); } /* Deprecated: Use LLVMGetBitcodeModule instead. */ LLVMBool LLVMGetBitcodeModuleProvider(LLVMMemoryBufferRef MemBuf, LLVMModuleProviderRef *OutMP, char **OutMessage) { return LLVMGetBitcodeModuleProviderInContext(LLVMGetGlobalContext(), MemBuf, OutMP, OutMessage); }
0
repos/DirectXShaderCompiler/lib/Bitcode
repos/DirectXShaderCompiler/lib/Bitcode/Reader/BitcodeReader.cpp
//===- BitcodeReader.cpp - Internal BitcodeReader implementation ----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Bitcode/ReaderWriter.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Triple.h" #include "llvm/Bitcode/BitstreamReader.h" #include "llvm/Bitcode/LLVMBitCodes.h" #include "llvm/IR/AutoUpgrade.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/IR/GVMaterializer.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/OperandTraits.h" #include "llvm/IR/Operator.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/DataStream.h" #include "llvm/Support/ManagedStatic.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/raw_ostream.h" #include <deque> #include <unordered_set> // HLSL Change #include "dxc/DXIL/DxilOperations.h" // HLSL Change using namespace llvm; namespace { enum { SWITCH_INST_MAGIC = 0x4B5 // May 2012 => 1205 => Hex }; class BitcodeReaderValueList { std::vector<WeakTrackingVH> ValuePtrs; /// As we resolve forward-referenced constants, we add information about them /// to this vector. This allows us to resolve them in bulk instead of /// resolving each reference at a time. See the code in /// ResolveConstantForwardRefs for more information about this. /// /// The key of this vector is the placeholder constant, the value is the slot /// number that holds the resolved value. typedef std::vector<std::pair<Constant*, unsigned> > ResolveConstantsTy; ResolveConstantsTy ResolveConstants; LLVMContext &Context; public: BitcodeReaderValueList(LLVMContext &C) : Context(C) {} ~BitcodeReaderValueList() { assert(ResolveConstants.empty() && "Constants not resolved?"); } // vector compatibility methods unsigned size() const { return ValuePtrs.size(); } void resize(unsigned N) { ValuePtrs.resize(N); } void push_back(Value *V) { ValuePtrs.emplace_back(V); } void clear() { assert(ResolveConstants.empty() && "Constants not resolved?"); ValuePtrs.clear(); } Value *operator[](unsigned i) const { assert(i < ValuePtrs.size()); return ValuePtrs[i]; } Value *back() const { return ValuePtrs.back(); } void pop_back() { ValuePtrs.pop_back(); } bool empty() const { return ValuePtrs.empty(); } void shrinkTo(unsigned N) { assert(N <= size() && "Invalid shrinkTo request!"); ValuePtrs.resize(N); } Constant *getConstantFwdRef(unsigned Idx, Type *Ty); Value *getValueFwdRef(unsigned Idx, Type *Ty); void assignValue(Value *V, unsigned Idx); /// Once all constants are read, this method bulk resolves any forward /// references. void resolveConstantForwardRefs(); }; class BitcodeReaderMDValueList { unsigned NumFwdRefs; bool AnyFwdRefs; unsigned MinFwdRef; unsigned MaxFwdRef; std::vector<TrackingMDRef> MDValuePtrs; LLVMContext &Context; public: BitcodeReaderMDValueList(LLVMContext &C) : NumFwdRefs(0), AnyFwdRefs(false), Context(C) {} // vector compatibility methods unsigned size() const { return MDValuePtrs.size(); } void resize(unsigned N) { MDValuePtrs.resize(N); } void push_back(Metadata *MD) { MDValuePtrs.emplace_back(MD); } void clear() { MDValuePtrs.clear(); } Metadata *back() const { return MDValuePtrs.back(); } void pop_back() { MDValuePtrs.pop_back(); } bool empty() const { return MDValuePtrs.empty(); } Metadata *operator[](unsigned i) const { assert(i < MDValuePtrs.size()); return MDValuePtrs[i]; } void shrinkTo(unsigned N) { assert(N <= size() && "Invalid shrinkTo request!"); MDValuePtrs.resize(N); } Metadata *getValueFwdRef(unsigned Idx); void assignValue(Metadata *MD, unsigned Idx); void tryToResolveCycles(); }; class BitcodeReader : public GVMaterializer { LLVMContext &Context; DiagnosticHandlerFunction DiagnosticHandler; Module *TheModule = nullptr; std::unique_ptr<MemoryBuffer> Buffer; std::unique_ptr<BitstreamReader> StreamFile; BitstreamCursor Stream; uint64_t NextUnreadBit = 0; bool SeenValueSymbolTable = false; std::vector<Type*> TypeList; BitcodeReaderValueList ValueList; BitcodeReaderMDValueList MDValueList; std::vector<Comdat *> ComdatList; SmallVector<Instruction *, 64> InstructionList; std::vector<std::pair<GlobalVariable*, unsigned> > GlobalInits; std::vector<std::pair<GlobalAlias*, unsigned> > AliasInits; std::vector<std::pair<Function*, unsigned> > FunctionPrefixes; std::vector<std::pair<Function*, unsigned> > FunctionPrologues; std::vector<std::pair<Function*, unsigned> > FunctionPersonalityFns; SmallVector<Instruction*, 64> InstsWithTBAATag; /// The set of attributes by index. Index zero in the file is for null, and /// is thus not represented here. As such all indices are off by one. std::vector<AttributeSet> MAttributes; /// \brief The set of attribute groups. std::map<unsigned, AttributeSet> MAttributeGroups; /// While parsing a function body, this is a list of the basic blocks for the /// function. std::vector<BasicBlock*> FunctionBBs; // When reading the module header, this list is populated with functions that // have bodies later in the file. std::vector<Function*> FunctionsWithBodies; // When intrinsic functions are encountered which require upgrading they are // stored here with their replacement function. typedef DenseMap<Function*, Function*> UpgradedIntrinsicMap; UpgradedIntrinsicMap UpgradedIntrinsics; // Map the bitcode's custom MDKind ID to the Module's MDKind ID. DenseMap<unsigned, unsigned> MDKindMap; // Several operations happen after the module header has been read, but // before function bodies are processed. This keeps track of whether // we've done this yet. bool SeenFirstFunctionBody = false; /// When function bodies are initially scanned, this map contains info about /// where to find deferred function body in the stream. DenseMap<Function*, uint64_t> DeferredFunctionInfo; /// When Metadata block is initially scanned when parsing the module, we may /// choose to defer parsing of the metadata. This vector contains info about /// which Metadata blocks are deferred. std::vector<uint64_t> DeferredMetadataInfo; /// These are basic blocks forward-referenced by block addresses. They are /// inserted lazily into functions when they're loaded. The basic block ID is /// its index into the vector. DenseMap<Function *, std::vector<BasicBlock *>> BasicBlockFwdRefs; std::deque<Function *> BasicBlockFwdRefQueue; /// Indicates that we are using a new encoding for instruction operands where /// most operands in the current FUNCTION_BLOCK are encoded relative to the /// instruction number, for a more compact encoding. Some instruction /// operands are not relative to the instruction ID: basic block numbers, and /// types. Once the old style function blocks have been phased out, we would /// not need this flag. bool UseRelativeIDs = false; /// True if all functions will be materialized, negating the need to process /// (e.g.) blockaddress forward references. bool WillMaterializeAllForwardRefs = false; /// Functions that have block addresses taken. This is usually empty. SmallPtrSet<const Function *, 4> BlockAddressesTaken; /// True if any Metadata block has been materialized. bool IsMetadataMaterialized = false; bool StripDebugInfo = false; public: std::error_code error(BitcodeError E, const Twine &Message); // HLSL Change: Remove unused function declaration // std::error_code error(BitcodeError E); std::error_code error(const Twine &Message); BitcodeReader(std::unique_ptr<MemoryBuffer> &&Buffer, LLVMContext &Context, // HLSL Change: unique_ptr DiagnosticHandlerFunction DiagnosticHandler); BitcodeReader(LLVMContext &Context, DiagnosticHandlerFunction DiagnosticHandler); ~BitcodeReader() override { freeState(); } std::error_code materializeForwardReferencedFunctions(); void freeState(); // HLSL Change: remove unused function declaration // void releaseBuffer(); bool ShouldTrackBitstreamUsage = false; // HLSL Change BitstreamUseTracker Tracker; // HLSL Change bool isDematerializable(const GlobalValue *GV) const override; std::error_code materialize(GlobalValue *GV) override; std::error_code materializeModule(Module *M) override; std::vector<StructType *> getIdentifiedStructTypes() const override; void dematerialize(GlobalValue *GV) override; /// \brief Main interface to parsing a bitcode buffer. /// \returns true if an error occurred. std::error_code parseBitcodeInto(std::unique_ptr<DataStreamer> Streamer, Module *M, bool ShouldLazyLoadMetadata = false); /// \brief Cheap mechanism to just extract module triple /// \returns true if an error occurred. ErrorOr<std::string> parseTriple(); static uint64_t decodeSignRotatedValue(uint64_t V); /// Materialize any deferred Metadata block. std::error_code materializeMetadata() override; void setStripDebugInfo() override; private: std::vector<StructType *> IdentifiedStructTypes; StructType *createIdentifiedStructType(LLVMContext &Context, StringRef Name); StructType *createIdentifiedStructType(LLVMContext &Context); Type *getTypeByID(unsigned ID); Value *getFnValueByID(unsigned ID, Type *Ty) { if (Ty && Ty->isMetadataTy()) return MetadataAsValue::get(Ty->getContext(), getFnMetadataByID(ID)); return ValueList.getValueFwdRef(ID, Ty); } Metadata *getFnMetadataByID(unsigned ID) { return MDValueList.getValueFwdRef(ID); } BasicBlock *getBasicBlock(unsigned ID) const { if (ID >= FunctionBBs.size()) return nullptr; // Invalid ID return FunctionBBs[ID]; } AttributeSet getAttributes(unsigned i) const { if (i-1 < MAttributes.size()) return MAttributes[i-1]; return AttributeSet(); } /// Read a value/type pair out of the specified record from slot 'Slot'. /// Increment Slot past the number of slots used in the record. Return true on /// failure. bool getValueTypePair(SmallVectorImpl<uint64_t> &Record, unsigned &Slot, unsigned InstNum, Value *&ResVal) { if (Slot == Record.size()) return true; unsigned ValNo = (unsigned)Record[Slot++]; // Adjust the ValNo, if it was encoded relative to the InstNum. if (UseRelativeIDs) ValNo = InstNum - ValNo; if (ValNo < InstNum) { // If this is not a forward reference, just return the value we already // have. ResVal = getFnValueByID(ValNo, nullptr); return ResVal == nullptr; } if (Slot == Record.size()) return true; unsigned TypeNo = (unsigned)Record[Slot++]; ResVal = getFnValueByID(ValNo, getTypeByID(TypeNo)); return ResVal == nullptr; } /// Read a value out of the specified record from slot 'Slot'. Increment Slot /// past the number of slots used by the value in the record. Return true if /// there is an error. bool popValue(SmallVectorImpl<uint64_t> &Record, unsigned &Slot, unsigned InstNum, Type *Ty, Value *&ResVal) { if (getValue(Record, Slot, InstNum, Ty, ResVal)) return true; // All values currently take a single record slot. ++Slot; return false; } /// Like popValue, but does not increment the Slot number. bool getValue(SmallVectorImpl<uint64_t> &Record, unsigned Slot, unsigned InstNum, Type *Ty, Value *&ResVal) { ResVal = getValue(Record, Slot, InstNum, Ty); return ResVal == nullptr; } /// Version of getValue that returns ResVal directly, or 0 if there is an /// error. Value *getValue(SmallVectorImpl<uint64_t> &Record, unsigned Slot, unsigned InstNum, Type *Ty) { if (Slot == Record.size()) return nullptr; unsigned ValNo = (unsigned)Record[Slot]; // Adjust the ValNo, if it was encoded relative to the InstNum. if (UseRelativeIDs) ValNo = InstNum - ValNo; return getFnValueByID(ValNo, Ty); } /// Like getValue, but decodes signed VBRs. Value *getValueSigned(SmallVectorImpl<uint64_t> &Record, unsigned Slot, unsigned InstNum, Type *Ty) { if (Slot == Record.size()) return nullptr; unsigned ValNo = (unsigned)decodeSignRotatedValue(Record[Slot]); // Adjust the ValNo, if it was encoded relative to the InstNum. if (UseRelativeIDs) ValNo = InstNum - ValNo; return getFnValueByID(ValNo, Ty); } /// Converts alignment exponent (i.e. power of two (or zero)) to the /// corresponding alignment to use. If alignment is too large, returns /// a corresponding error code. std::error_code parseAlignmentValue(uint64_t Exponent, unsigned &Alignment); std::error_code parseAttrKind(uint64_t Code, Attribute::AttrKind *Kind); std::error_code parseModule(bool Resume, bool ShouldLazyLoadMetadata = false); std::error_code parseAttributeBlock(); std::error_code parseAttributeGroupBlock(); std::error_code parseTypeTable(); std::error_code parseTypeTableBody(); std::error_code parseValueSymbolTable(); std::error_code parseConstants(); std::error_code rememberAndSkipFunctionBody(); /// Save the positions of the Metadata blocks and skip parsing the blocks. std::error_code rememberAndSkipMetadata(); std::error_code parseFunctionBody(Function *F); std::error_code globalCleanup(); std::error_code resolveGlobalAndAliasInits(); std::error_code parseMetadata(); std::error_code parseSelectNamedMetadata(ArrayRef<StringRef> NamedMetadata); // HLSL Change std::error_code materializeSelectNamedMetadata(ArrayRef<StringRef> NamedMetadata) override; // HLSL Change std::error_code parseMetadataAttachment(Function &F); ErrorOr<std::string> parseModuleTriple(); std::error_code parseUseLists(); std::error_code initStream(std::unique_ptr<DataStreamer> Streamer); std::error_code initStreamFromBuffer(); std::error_code initLazyStream(std::unique_ptr<DataStreamer> Streamer); std::error_code findFunctionInStream( Function *F, DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator); }; } // namespace BitcodeDiagnosticInfo::BitcodeDiagnosticInfo(std::error_code EC, DiagnosticSeverity Severity, const Twine &Msg) : DiagnosticInfo(DK_Bitcode, Severity), Msg(Msg), EC(EC) {} void BitcodeDiagnosticInfo::print(DiagnosticPrinter &DP) const { DP << Msg; } static std::error_code error(DiagnosticHandlerFunction DiagnosticHandler, std::error_code EC, const Twine &Message) { BitcodeDiagnosticInfo DI(EC, DS_Error, Message); DiagnosticHandler(DI); return EC; } // HLSL Change: remove unused function #if 0 static std::error_code error(DiagnosticHandlerFunction DiagnosticHandler, std::error_code EC) { return error(DiagnosticHandler, EC, EC.message()); } #endif static std::error_code error(DiagnosticHandlerFunction DiagnosticHandler, const Twine &Message) { return error(DiagnosticHandler, make_error_code(BitcodeError::CorruptedBitcode), Message); } std::error_code BitcodeReader::error(BitcodeError E, const Twine &Message) { return ::error(DiagnosticHandler, make_error_code(E), Message); } std::error_code BitcodeReader::error(const Twine &Message) { return ::error(DiagnosticHandler, make_error_code(BitcodeError::CorruptedBitcode), Message); } // HLSL Change: remove unused function #if 0 std::error_code BitcodeReader::error(BitcodeError E) { return ::error(DiagnosticHandler, make_error_code(E)); } #endif static DiagnosticHandlerFunction getDiagHandler(DiagnosticHandlerFunction F, LLVMContext &C) { if (F) return F; return [&C](const DiagnosticInfo &DI) { C.diagnose(DI); }; } // HLSL Change Starts static void ReportWarning(DiagnosticHandlerFunction F, const char *Msg) { Twine tmsg(Msg); BitcodeDiagnosticInfo BDI(std::error_code(), DiagnosticSeverity::DS_Warning, tmsg); F(BDI); } // HLSL Change Ends BitcodeReader::BitcodeReader(std::unique_ptr<MemoryBuffer> &&Buffer, LLVMContext &Context, // HLSL Change: unique_ptr DiagnosticHandlerFunction DiagnosticHandler) : Context(Context), DiagnosticHandler(getDiagHandler(DiagnosticHandler, Context)), Buffer(std::move(Buffer)), ValueList(Context), MDValueList(Context) {} // HLSL Change: std::move BitcodeReader::BitcodeReader(LLVMContext &Context, DiagnosticHandlerFunction DiagnosticHandler) : Context(Context), DiagnosticHandler(getDiagHandler(DiagnosticHandler, Context)), Buffer(nullptr), ValueList(Context), MDValueList(Context) {} std::error_code BitcodeReader::materializeForwardReferencedFunctions() { if (WillMaterializeAllForwardRefs) return std::error_code(); // Prevent recursion. WillMaterializeAllForwardRefs = true; while (!BasicBlockFwdRefQueue.empty()) { Function *F = BasicBlockFwdRefQueue.front(); BasicBlockFwdRefQueue.pop_front(); assert(F && "Expected valid function"); if (!BasicBlockFwdRefs.count(F)) // Already materialized. continue; // Check for a function that isn't materializable to prevent an infinite // loop. When parsing a blockaddress stored in a global variable, there // isn't a trivial way to check if a function will have a body without a // linear search through FunctionsWithBodies, so just check it here. if (!F->isMaterializable()) return error("Never resolved function from blockaddress"); // Try to materialize F. if (std::error_code EC = materialize(F)) return EC; } assert(BasicBlockFwdRefs.empty() && "Function missing from queue"); // Reset state. WillMaterializeAllForwardRefs = false; return std::error_code(); } void BitcodeReader::freeState() { Buffer = nullptr; std::vector<Type*>().swap(TypeList); ValueList.clear(); MDValueList.clear(); std::vector<Comdat *>().swap(ComdatList); std::vector<AttributeSet>().swap(MAttributes); std::vector<BasicBlock*>().swap(FunctionBBs); std::vector<Function*>().swap(FunctionsWithBodies); DeferredFunctionInfo.clear(); DeferredMetadataInfo.clear(); MDKindMap.clear(); assert(BasicBlockFwdRefs.empty() && "Unresolved blockaddress fwd references"); BasicBlockFwdRefQueue.clear(); } //===----------------------------------------------------------------------===// // Helper functions to implement forward reference resolution, etc. //===----------------------------------------------------------------------===// /// Convert a string from a record into an std::string, return true on failure. template <typename StrTy> static bool convertToString(ArrayRef<uint64_t> Record, unsigned Idx, StrTy &Result) { if (Idx > Record.size()) return true; for (unsigned i = Idx, e = Record.size(); i != e; ++i) Result += (char)Record[i]; return false; } static bool hasImplicitComdat(size_t Val) { switch (Val) { default: return false; case 1: // Old WeakAnyLinkage case 4: // Old LinkOnceAnyLinkage case 10: // Old WeakODRLinkage case 11: // Old LinkOnceODRLinkage return true; } } static GlobalValue::LinkageTypes getDecodedLinkage(unsigned Val) { switch (Val) { default: // Map unknown/new linkages to external case 0: return GlobalValue::ExternalLinkage; case 2: return GlobalValue::AppendingLinkage; case 3: return GlobalValue::InternalLinkage; case 5: return GlobalValue::ExternalLinkage; // Obsolete DLLImportLinkage case 6: return GlobalValue::ExternalLinkage; // Obsolete DLLExportLinkage case 7: return GlobalValue::ExternalWeakLinkage; case 8: return GlobalValue::CommonLinkage; case 9: return GlobalValue::PrivateLinkage; case 12: return GlobalValue::AvailableExternallyLinkage; case 13: return GlobalValue::PrivateLinkage; // Obsolete LinkerPrivateLinkage case 14: return GlobalValue::PrivateLinkage; // Obsolete LinkerPrivateWeakLinkage case 15: return GlobalValue::ExternalLinkage; // Obsolete LinkOnceODRAutoHideLinkage case 1: // Old value with implicit comdat. case 16: return GlobalValue::WeakAnyLinkage; case 10: // Old value with implicit comdat. case 17: return GlobalValue::WeakODRLinkage; case 4: // Old value with implicit comdat. case 18: return GlobalValue::LinkOnceAnyLinkage; case 11: // Old value with implicit comdat. case 19: return GlobalValue::LinkOnceODRLinkage; } } static GlobalValue::VisibilityTypes getDecodedVisibility(unsigned Val) { switch (Val) { default: // Map unknown visibilities to default. case 0: return GlobalValue::DefaultVisibility; case 1: return GlobalValue::HiddenVisibility; case 2: return GlobalValue::ProtectedVisibility; } } static GlobalValue::DLLStorageClassTypes getDecodedDLLStorageClass(unsigned Val) { switch (Val) { default: // Map unknown values to default. case 0: return GlobalValue::DefaultStorageClass; case 1: return GlobalValue::DLLImportStorageClass; case 2: return GlobalValue::DLLExportStorageClass; } } static GlobalVariable::ThreadLocalMode getDecodedThreadLocalMode(unsigned Val) { switch (Val) { case 0: return GlobalVariable::NotThreadLocal; default: // Map unknown non-zero value to general dynamic. case 1: return GlobalVariable::GeneralDynamicTLSModel; case 2: return GlobalVariable::LocalDynamicTLSModel; case 3: return GlobalVariable::InitialExecTLSModel; case 4: return GlobalVariable::LocalExecTLSModel; } } static int getDecodedCastOpcode(unsigned Val) { switch (Val) { default: return -1; case bitc::CAST_TRUNC : return Instruction::Trunc; case bitc::CAST_ZEXT : return Instruction::ZExt; case bitc::CAST_SEXT : return Instruction::SExt; case bitc::CAST_FPTOUI : return Instruction::FPToUI; case bitc::CAST_FPTOSI : return Instruction::FPToSI; case bitc::CAST_UITOFP : return Instruction::UIToFP; case bitc::CAST_SITOFP : return Instruction::SIToFP; case bitc::CAST_FPTRUNC : return Instruction::FPTrunc; case bitc::CAST_FPEXT : return Instruction::FPExt; case bitc::CAST_PTRTOINT: return Instruction::PtrToInt; case bitc::CAST_INTTOPTR: return Instruction::IntToPtr; case bitc::CAST_BITCAST : return Instruction::BitCast; case bitc::CAST_ADDRSPACECAST: return Instruction::AddrSpaceCast; } } static int getDecodedBinaryOpcode(unsigned Val, Type *Ty) { bool IsFP = Ty->isFPOrFPVectorTy(); // BinOps are only valid for int/fp or vector of int/fp types if (!IsFP && !Ty->isIntOrIntVectorTy()) return -1; switch (Val) { default: return -1; case bitc::BINOP_ADD: return IsFP ? Instruction::FAdd : Instruction::Add; case bitc::BINOP_SUB: return IsFP ? Instruction::FSub : Instruction::Sub; case bitc::BINOP_MUL: return IsFP ? Instruction::FMul : Instruction::Mul; case bitc::BINOP_UDIV: return IsFP ? -1 : Instruction::UDiv; case bitc::BINOP_SDIV: return IsFP ? Instruction::FDiv : Instruction::SDiv; case bitc::BINOP_UREM: return IsFP ? -1 : Instruction::URem; case bitc::BINOP_SREM: return IsFP ? Instruction::FRem : Instruction::SRem; case bitc::BINOP_SHL: return IsFP ? -1 : Instruction::Shl; case bitc::BINOP_LSHR: return IsFP ? -1 : Instruction::LShr; case bitc::BINOP_ASHR: return IsFP ? -1 : Instruction::AShr; case bitc::BINOP_AND: return IsFP ? -1 : Instruction::And; case bitc::BINOP_OR: return IsFP ? -1 : Instruction::Or; case bitc::BINOP_XOR: return IsFP ? -1 : Instruction::Xor; } } static AtomicRMWInst::BinOp getDecodedRMWOperation(unsigned Val) { switch (Val) { default: return AtomicRMWInst::BAD_BINOP; case bitc::RMW_XCHG: return AtomicRMWInst::Xchg; case bitc::RMW_ADD: return AtomicRMWInst::Add; case bitc::RMW_SUB: return AtomicRMWInst::Sub; case bitc::RMW_AND: return AtomicRMWInst::And; case bitc::RMW_NAND: return AtomicRMWInst::Nand; case bitc::RMW_OR: return AtomicRMWInst::Or; case bitc::RMW_XOR: return AtomicRMWInst::Xor; case bitc::RMW_MAX: return AtomicRMWInst::Max; case bitc::RMW_MIN: return AtomicRMWInst::Min; case bitc::RMW_UMAX: return AtomicRMWInst::UMax; case bitc::RMW_UMIN: return AtomicRMWInst::UMin; } } static AtomicOrdering getDecodedOrdering(unsigned Val) { switch (Val) { case bitc::ORDERING_NOTATOMIC: return NotAtomic; case bitc::ORDERING_UNORDERED: return Unordered; case bitc::ORDERING_MONOTONIC: return Monotonic; case bitc::ORDERING_ACQUIRE: return Acquire; case bitc::ORDERING_RELEASE: return Release; case bitc::ORDERING_ACQREL: return AcquireRelease; default: // Map unknown orderings to sequentially-consistent. case bitc::ORDERING_SEQCST: return SequentiallyConsistent; } } static SynchronizationScope getDecodedSynchScope(unsigned Val) { switch (Val) { case bitc::SYNCHSCOPE_SINGLETHREAD: return SingleThread; default: // Map unknown scopes to cross-thread. case bitc::SYNCHSCOPE_CROSSTHREAD: return CrossThread; } } static Comdat::SelectionKind getDecodedComdatSelectionKind(unsigned Val) { switch (Val) { default: // Map unknown selection kinds to any. case bitc::COMDAT_SELECTION_KIND_ANY: return Comdat::Any; case bitc::COMDAT_SELECTION_KIND_EXACT_MATCH: return Comdat::ExactMatch; case bitc::COMDAT_SELECTION_KIND_LARGEST: return Comdat::Largest; case bitc::COMDAT_SELECTION_KIND_NO_DUPLICATES: return Comdat::NoDuplicates; case bitc::COMDAT_SELECTION_KIND_SAME_SIZE: return Comdat::SameSize; } } static FastMathFlags getDecodedFastMathFlags(unsigned Val) { FastMathFlags FMF; if (0 != (Val & FastMathFlags::UnsafeAlgebra)) FMF.setUnsafeAlgebra(); if (0 != (Val & FastMathFlags::NoNaNs)) FMF.setNoNaNs(); if (0 != (Val & FastMathFlags::NoInfs)) FMF.setNoInfs(); if (0 != (Val & FastMathFlags::NoSignedZeros)) FMF.setNoSignedZeros(); if (0 != (Val & FastMathFlags::AllowReciprocal)) FMF.setAllowReciprocal(); return FMF; } static void upgradeDLLImportExportLinkage(llvm::GlobalValue *GV, unsigned Val) { switch (Val) { case 5: GV->setDLLStorageClass(GlobalValue::DLLImportStorageClass); break; case 6: GV->setDLLStorageClass(GlobalValue::DLLExportStorageClass); break; } } namespace llvm { namespace { /// \brief A class for maintaining the slot number definition /// as a placeholder for the actual definition for forward constants defs. class ConstantPlaceHolder : public ConstantExpr { void operator=(const ConstantPlaceHolder &) = delete; public: // allocate space for exactly one operand void *operator new(size_t s) { return User::operator new(s, 1); } explicit ConstantPlaceHolder(Type *Ty, LLVMContext &Context) : ConstantExpr(Ty, Instruction::UserOp1, &Op<0>(), 1) { Op<0>() = UndefValue::get(Type::getInt32Ty(Context)); } /// \brief Methods to support type inquiry through isa, cast, and dyn_cast. static bool classof(const Value *V) { return isa<ConstantExpr>(V) && cast<ConstantExpr>(V)->getOpcode() == Instruction::UserOp1; } /// Provide fast operand accessors DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); }; } // FIXME: can we inherit this from ConstantExpr? template <> struct OperandTraits<ConstantPlaceHolder> : public FixedNumOperandTraits<ConstantPlaceHolder, 1> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantPlaceHolder, Value) } void BitcodeReaderValueList::assignValue(Value *V, unsigned Idx) { if (Idx == size()) { push_back(V); return; } if (Idx >= size()) resize(Idx+1); WeakTrackingVH &OldV = ValuePtrs[Idx]; if (!OldV) { OldV = V; return; } // Handle constants and non-constants (e.g. instrs) differently for // efficiency. if (Constant *PHC = dyn_cast<Constant>(&*OldV)) { ResolveConstants.push_back(std::make_pair(PHC, Idx)); OldV = V; } else { // If there was a forward reference to this value, replace it. Value *PrevVal = OldV; OldV->replaceAllUsesWith(V); delete PrevVal; } } Constant *BitcodeReaderValueList::getConstantFwdRef(unsigned Idx, Type *Ty) { if (Idx >= size()) resize(Idx + 1); if (Value *V = ValuePtrs[Idx]) { if (Ty != V->getType()) report_fatal_error("Type mismatch in constant table!"); return cast<Constant>(V); } // Create and return a placeholder, which will later be RAUW'd. Constant *C = new ConstantPlaceHolder(Ty, Context); ValuePtrs[Idx] = C; return C; } Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, Type *Ty) { // Bail out for a clearly invalid value. This would make us call resize(0) if (Idx == UINT_MAX) return nullptr; if (Idx >= size()) resize(Idx + 1); if (Value *V = ValuePtrs[Idx]) { // If the types don't match, it's invalid. if (Ty && Ty != V->getType()) return nullptr; return V; } // No type specified, must be invalid reference. if (!Ty) return nullptr; // Create and return a placeholder, which will later be RAUW'd. Value *V = new Argument(Ty); ValuePtrs[Idx] = V; return V; } /// Once all constants are read, this method bulk resolves any forward /// references. The idea behind this is that we sometimes get constants (such /// as large arrays) which reference *many* forward ref constants. Replacing /// each of these causes a lot of thrashing when building/reuniquing the /// constant. Instead of doing this, we look at all the uses and rewrite all /// the place holders at once for any constant that uses a placeholder. void BitcodeReaderValueList::resolveConstantForwardRefs() { // Sort the values by-pointer so that they are efficient to look up with a // binary search. std::sort(ResolveConstants.begin(), ResolveConstants.end()); SmallVector<Constant*, 64> NewOps; while (!ResolveConstants.empty()) { Value *RealVal = operator[](ResolveConstants.back().second); Constant *Placeholder = ResolveConstants.back().first; ResolveConstants.pop_back(); // Loop over all users of the placeholder, updating them to reference the // new value. If they reference more than one placeholder, update them all // at once. while (!Placeholder->use_empty()) { auto UI = Placeholder->user_begin(); User *U = *UI; // If the using object isn't uniqued, just update the operands. This // handles instructions and initializers for global variables. if (!isa<Constant>(U) || isa<GlobalValue>(U)) { UI.getUse().set(RealVal); continue; } // Otherwise, we have a constant that uses the placeholder. Replace that // constant with a new constant that has *all* placeholder uses updated. Constant *UserC = cast<Constant>(U); for (User::op_iterator I = UserC->op_begin(), E = UserC->op_end(); I != E; ++I) { Value *NewOp; if (!isa<ConstantPlaceHolder>(*I)) { // Not a placeholder reference. NewOp = *I; } else if (*I == Placeholder) { // Common case is that it just references this one placeholder. NewOp = RealVal; } else { // Otherwise, look up the placeholder in ResolveConstants. ResolveConstantsTy::iterator It = std::lower_bound(ResolveConstants.begin(), ResolveConstants.end(), std::pair<Constant*, unsigned>(cast<Constant>(*I), 0)); assert(It != ResolveConstants.end() && It->first == *I); NewOp = operator[](It->second); } NewOps.push_back(cast<Constant>(NewOp)); } // Make the new constant. Constant *NewC; if (ConstantArray *UserCA = dyn_cast<ConstantArray>(UserC)) { NewC = ConstantArray::get(UserCA->getType(), NewOps); } else if (ConstantStruct *UserCS = dyn_cast<ConstantStruct>(UserC)) { NewC = ConstantStruct::get(UserCS->getType(), NewOps); } else if (isa<ConstantVector>(UserC)) { NewC = ConstantVector::get(NewOps); } else { assert(isa<ConstantExpr>(UserC) && "Must be a ConstantExpr."); NewC = cast<ConstantExpr>(UserC)->getWithOperands(NewOps); } UserC->replaceAllUsesWith(NewC); UserC->destroyConstant(); NewOps.clear(); } // Update all ValueHandles, they should be the only users at this point. Placeholder->replaceAllUsesWith(RealVal); delete Placeholder; } } void BitcodeReaderMDValueList::assignValue(Metadata *MD, unsigned Idx) { if (Idx == size()) { push_back(MD); return; } if (Idx >= size()) resize(Idx+1); TrackingMDRef &OldMD = MDValuePtrs[Idx]; if (!OldMD) { OldMD.reset(MD); return; } // If there was a forward reference to this value, replace it. TempMDTuple PrevMD(cast<MDTuple>(OldMD.get())); PrevMD->replaceAllUsesWith(MD); --NumFwdRefs; } Metadata *BitcodeReaderMDValueList::getValueFwdRef(unsigned Idx) { if (Idx >= size()) resize(Idx + 1); if (Metadata *MD = MDValuePtrs[Idx]) return MD; // Track forward refs to be resolved later. if (AnyFwdRefs) { MinFwdRef = std::min(MinFwdRef, Idx); MaxFwdRef = std::max(MaxFwdRef, Idx); } else { AnyFwdRefs = true; MinFwdRef = MaxFwdRef = Idx; } ++NumFwdRefs; // Create and return a placeholder, which will later be RAUW'd. Metadata *MD = MDNode::getTemporary(Context, None).release(); MDValuePtrs[Idx].reset(MD); return MD; } void BitcodeReaderMDValueList::tryToResolveCycles() { if (!AnyFwdRefs) // Nothing to do. return; if (NumFwdRefs) // Still forward references... can't resolve cycles. return; // Resolve any cycles. for (unsigned I = MinFwdRef, E = MaxFwdRef + 1; I != E; ++I) { auto &MD = MDValuePtrs[I]; auto *N = dyn_cast_or_null<MDNode>(MD); if (!N) continue; assert(!N->isTemporary() && "Unexpected forward reference"); N->resolveCycles(); } // Make sure we return early again until there's another forward ref. AnyFwdRefs = false; } Type *BitcodeReader::getTypeByID(unsigned ID) { // The type table size is always specified correctly. if (ID >= TypeList.size()) return nullptr; if (Type *Ty = TypeList[ID]) return Ty; // If we have a forward reference, the only possible case is when it is to a // named struct. Just create a placeholder for now. return TypeList[ID] = createIdentifiedStructType(Context); } StructType *BitcodeReader::createIdentifiedStructType(LLVMContext &Context, StringRef Name) { auto *Ret = StructType::create(Context, Name); IdentifiedStructTypes.push_back(Ret); return Ret; } StructType *BitcodeReader::createIdentifiedStructType(LLVMContext &Context) { auto *Ret = StructType::create(Context); IdentifiedStructTypes.push_back(Ret); return Ret; } //===----------------------------------------------------------------------===// // Functions for parsing blocks from the bitcode file //===----------------------------------------------------------------------===// /// \brief This fills an AttrBuilder object with the LLVM attributes that have /// been decoded from the given integer. This function must stay in sync with /// 'encodeLLVMAttributesForBitcode'. static void decodeLLVMAttributesForBitcode(AttrBuilder &B, uint64_t EncodedAttrs) { // FIXME: Remove in 4.0. // The alignment is stored as a 16-bit raw value from bits 31--16. We shift // the bits above 31 down by 11 bits. unsigned Alignment = (EncodedAttrs & (0xffffULL << 16)) >> 16; assert((!Alignment || isPowerOf2_32(Alignment)) && "Alignment must be a power of two."); if (Alignment) B.addAlignmentAttr(Alignment); B.addRawValue(((EncodedAttrs & (0xfffffULL << 32)) >> 11) | (EncodedAttrs & 0xffff)); } std::error_code BitcodeReader::parseAttributeBlock() { if (Stream.EnterSubBlock(bitc::PARAMATTR_BLOCK_ID)) return error("Invalid record"); if (!MAttributes.empty()) return error("Invalid multiple blocks"); SmallVector<uint64_t, 64> Record; SmallVector<AttributeSet, 8> Attrs; // Read all the records. while (1) { // HLSL Change Starts - count skipped blocks unsigned skipCount = 0; BitstreamEntry Entry = Stream.advanceSkippingSubblocks(0, &skipCount); if (skipCount) ReportWarning(DiagnosticHandler, "Unrecognized subblock"); // HLSL Change End switch (Entry.Kind) { case BitstreamEntry::SubBlock: // Handled for us already. case BitstreamEntry::Error: return error("Malformed block"); case BitstreamEntry::EndBlock: return std::error_code(); case BitstreamEntry::Record: // The interesting case. break; } // Read a record. Record.clear(); switch (Stream.readRecord(Entry.ID, Record)) { default: // Default behavior: ignore. break; case bitc::PARAMATTR_CODE_ENTRY_OLD: { // ENTRY: [paramidx0, attr0, ...] // FIXME: Remove in 4.0. if (Record.size() & 1) return error("Invalid record"); for (unsigned i = 0, e = Record.size(); i != e; i += 2) { AttrBuilder B; decodeLLVMAttributesForBitcode(B, Record[i+1]); Attrs.push_back(AttributeSet::get(Context, Record[i], B)); } MAttributes.push_back(AttributeSet::get(Context, Attrs)); Attrs.clear(); break; } case bitc::PARAMATTR_CODE_ENTRY: { // ENTRY: [attrgrp0, attrgrp1, ...] for (unsigned i = 0, e = Record.size(); i != e; ++i) Attrs.push_back(MAttributeGroups[Record[i]]); MAttributes.push_back(AttributeSet::get(Context, Attrs)); Attrs.clear(); break; } } } } // Returns Attribute::None on unrecognized codes. static Attribute::AttrKind getAttrFromCode(uint64_t Code) { switch (Code) { default: return Attribute::None; case bitc::ATTR_KIND_ALIGNMENT: return Attribute::Alignment; case bitc::ATTR_KIND_ALWAYS_INLINE: return Attribute::AlwaysInline; case bitc::ATTR_KIND_ARGMEMONLY: return Attribute::ArgMemOnly; case bitc::ATTR_KIND_BUILTIN: return Attribute::Builtin; case bitc::ATTR_KIND_BY_VAL: return Attribute::ByVal; case bitc::ATTR_KIND_IN_ALLOCA: return Attribute::InAlloca; case bitc::ATTR_KIND_COLD: return Attribute::Cold; case bitc::ATTR_KIND_CONVERGENT: return Attribute::Convergent; case bitc::ATTR_KIND_INLINE_HINT: return Attribute::InlineHint; case bitc::ATTR_KIND_IN_REG: return Attribute::InReg; case bitc::ATTR_KIND_JUMP_TABLE: return Attribute::JumpTable; case bitc::ATTR_KIND_MIN_SIZE: return Attribute::MinSize; case bitc::ATTR_KIND_NAKED: return Attribute::Naked; case bitc::ATTR_KIND_NEST: return Attribute::Nest; case bitc::ATTR_KIND_NO_ALIAS: return Attribute::NoAlias; case bitc::ATTR_KIND_NO_BUILTIN: return Attribute::NoBuiltin; case bitc::ATTR_KIND_NO_CAPTURE: return Attribute::NoCapture; case bitc::ATTR_KIND_NO_DUPLICATE: return Attribute::NoDuplicate; case bitc::ATTR_KIND_NO_IMPLICIT_FLOAT: return Attribute::NoImplicitFloat; case bitc::ATTR_KIND_NO_INLINE: return Attribute::NoInline; case bitc::ATTR_KIND_NON_LAZY_BIND: return Attribute::NonLazyBind; case bitc::ATTR_KIND_NON_NULL: return Attribute::NonNull; case bitc::ATTR_KIND_DEREFERENCEABLE: return Attribute::Dereferenceable; case bitc::ATTR_KIND_DEREFERENCEABLE_OR_NULL: return Attribute::DereferenceableOrNull; case bitc::ATTR_KIND_NO_RED_ZONE: return Attribute::NoRedZone; case bitc::ATTR_KIND_NO_RETURN: return Attribute::NoReturn; case bitc::ATTR_KIND_NO_UNWIND: return Attribute::NoUnwind; case bitc::ATTR_KIND_OPTIMIZE_FOR_SIZE: return Attribute::OptimizeForSize; case bitc::ATTR_KIND_OPTIMIZE_NONE: return Attribute::OptimizeNone; case bitc::ATTR_KIND_READ_NONE: return Attribute::ReadNone; case bitc::ATTR_KIND_READ_ONLY: return Attribute::ReadOnly; case bitc::ATTR_KIND_RETURNED: return Attribute::Returned; case bitc::ATTR_KIND_RETURNS_TWICE: return Attribute::ReturnsTwice; case bitc::ATTR_KIND_S_EXT: return Attribute::SExt; case bitc::ATTR_KIND_STACK_ALIGNMENT: return Attribute::StackAlignment; case bitc::ATTR_KIND_STACK_PROTECT: return Attribute::StackProtect; case bitc::ATTR_KIND_STACK_PROTECT_REQ: return Attribute::StackProtectReq; case bitc::ATTR_KIND_STACK_PROTECT_STRONG: return Attribute::StackProtectStrong; case bitc::ATTR_KIND_SAFESTACK: return Attribute::SafeStack; case bitc::ATTR_KIND_STRUCT_RET: return Attribute::StructRet; case bitc::ATTR_KIND_SANITIZE_ADDRESS: return Attribute::SanitizeAddress; case bitc::ATTR_KIND_SANITIZE_THREAD: return Attribute::SanitizeThread; case bitc::ATTR_KIND_SANITIZE_MEMORY: return Attribute::SanitizeMemory; case bitc::ATTR_KIND_UW_TABLE: return Attribute::UWTable; case bitc::ATTR_KIND_Z_EXT: return Attribute::ZExt; } } std::error_code BitcodeReader::parseAlignmentValue(uint64_t Exponent, unsigned &Alignment) { // Note: Alignment in bitcode files is incremented by 1, so that zero // can be used for default alignment. if (Exponent > Value::MaxAlignmentExponent + 1) return error("Invalid alignment value"); Alignment = (1 << static_cast<unsigned>(Exponent)) >> 1; return std::error_code(); } std::error_code BitcodeReader::parseAttrKind(uint64_t Code, Attribute::AttrKind *Kind) { *Kind = getAttrFromCode(Code); if (*Kind == Attribute::None) return error(BitcodeError::CorruptedBitcode, "Unknown attribute kind (" + Twine(Code) + ")"); return std::error_code(); } std::error_code BitcodeReader::parseAttributeGroupBlock() { if (Stream.EnterSubBlock(bitc::PARAMATTR_GROUP_BLOCK_ID)) return error("Invalid record"); if (!MAttributeGroups.empty()) return error("Invalid multiple blocks"); SmallVector<uint64_t, 64> Record; // Read all the records. while (1) { // HLSL Change Starts - count skipped blocks unsigned skipCount = 0; BitstreamEntry Entry = Stream.advanceSkippingSubblocks(0, &skipCount); if (skipCount) ReportWarning(DiagnosticHandler, "Unrecognized subblock"); // HLSL Change End switch (Entry.Kind) { case BitstreamEntry::SubBlock: // Handled for us already. case BitstreamEntry::Error: return error("Malformed block"); case BitstreamEntry::EndBlock: return std::error_code(); case BitstreamEntry::Record: // The interesting case. break; } // Read a record. Record.clear(); switch (Stream.readRecord(Entry.ID, Record)) { default: // Default behavior: ignore. break; case bitc::PARAMATTR_GRP_CODE_ENTRY: { // ENTRY: [grpid, idx, a0, a1, ...] if (Record.size() < 3) return error("Invalid record"); uint64_t GrpID = Record[0]; uint64_t Idx = Record[1]; // Index of the object this attribute refers to. AttrBuilder B; for (unsigned i = 2, e = Record.size(); i != e; ++i) { if (Record[i] == 0) { // Enum attribute Attribute::AttrKind Kind; if (std::error_code EC = parseAttrKind(Record[++i], &Kind)) return EC; B.addAttribute(Kind); } else if (Record[i] == 1) { // Integer attribute Attribute::AttrKind Kind; if (std::error_code EC = parseAttrKind(Record[++i], &Kind)) return EC; if (Kind == Attribute::Alignment) B.addAlignmentAttr(Record[++i]); else if (Kind == Attribute::StackAlignment) B.addStackAlignmentAttr(Record[++i]); else if (Kind == Attribute::Dereferenceable) B.addDereferenceableAttr(Record[++i]); else if (Kind == Attribute::DereferenceableOrNull) B.addDereferenceableOrNullAttr(Record[++i]); } else { // String attribute assert((Record[i] == 3 || Record[i] == 4) && "Invalid attribute group entry"); bool HasValue = (Record[i++] == 4); SmallString<64> KindStr; SmallString<64> ValStr; while (Record[i] != 0 && i != e) KindStr += Record[i++]; assert(Record[i] == 0 && "Kind string not null terminated"); if (HasValue) { // Has a value associated with it. ++i; // Skip the '0' that terminates the "kind" string. while (Record[i] != 0 && i != e) ValStr += Record[i++]; assert(Record[i] == 0 && "Value string not null terminated"); } B.addAttribute(KindStr.str(), ValStr.str()); } } MAttributeGroups[GrpID] = AttributeSet::get(Context, Idx, B); break; } } } } std::error_code BitcodeReader::parseTypeTable() { if (Stream.EnterSubBlock(bitc::TYPE_BLOCK_ID_NEW)) return error("Invalid record"); return parseTypeTableBody(); } std::error_code BitcodeReader::parseTypeTableBody() { if (!TypeList.empty()) return error("Invalid multiple blocks"); SmallVector<uint64_t, 64> Record; unsigned NumRecords = 0; SmallString<64> TypeName; // Read all the records for this type table. while (1) { // HLSL Change Starts - count skipped blocks unsigned skipCount = 0; BitstreamEntry Entry = Stream.advanceSkippingSubblocks(0, &skipCount); if (skipCount) ReportWarning(DiagnosticHandler, "Unrecognized subblock"); // HLSL Change End switch (Entry.Kind) { case BitstreamEntry::SubBlock: // Handled for us already. case BitstreamEntry::Error: return error("Malformed block"); case BitstreamEntry::EndBlock: if (NumRecords != TypeList.size()) return error("Malformed block"); return std::error_code(); case BitstreamEntry::Record: // The interesting case. break; } // Read a record. Record.clear(); Type *ResultTy = nullptr; switch (Stream.readRecord(Entry.ID, Record)) { default: return error("Invalid value"); case bitc::TYPE_CODE_NUMENTRY: // TYPE_CODE_NUMENTRY: [numentries] // TYPE_CODE_NUMENTRY contains a count of the number of types in the // type list. This allows us to reserve space. if (Record.size() < 1) return error("Invalid record"); TypeList.resize(Record[0]); continue; case bitc::TYPE_CODE_VOID: // VOID ResultTy = Type::getVoidTy(Context); break; case bitc::TYPE_CODE_HALF: // HALF ResultTy = Type::getHalfTy(Context); break; case bitc::TYPE_CODE_FLOAT: // FLOAT ResultTy = Type::getFloatTy(Context); break; case bitc::TYPE_CODE_DOUBLE: // DOUBLE ResultTy = Type::getDoubleTy(Context); break; case bitc::TYPE_CODE_X86_FP80: // X86_FP80 ResultTy = Type::getX86_FP80Ty(Context); break; case bitc::TYPE_CODE_FP128: // FP128 ResultTy = Type::getFP128Ty(Context); break; case bitc::TYPE_CODE_PPC_FP128: // PPC_FP128 ResultTy = Type::getPPC_FP128Ty(Context); break; case bitc::TYPE_CODE_LABEL: // LABEL ResultTy = Type::getLabelTy(Context); break; case bitc::TYPE_CODE_METADATA: // METADATA ResultTy = Type::getMetadataTy(Context); break; case bitc::TYPE_CODE_X86_MMX: // X86_MMX ResultTy = Type::getX86_MMXTy(Context); break; case bitc::TYPE_CODE_INTEGER: { // INTEGER: [width] if (Record.size() < 1) return error("Invalid record"); uint64_t NumBits = Record[0]; if (NumBits < IntegerType::MIN_INT_BITS || NumBits > IntegerType::MAX_INT_BITS) return error("Bitwidth for integer type out of range"); ResultTy = IntegerType::get(Context, NumBits); break; } case bitc::TYPE_CODE_POINTER: { // POINTER: [pointee type] or // [pointee type, address space] if (Record.size() < 1) return error("Invalid record"); unsigned AddressSpace = 0; if (Record.size() == 2) AddressSpace = Record[1]; ResultTy = getTypeByID(Record[0]); if (!ResultTy || !PointerType::isValidElementType(ResultTy)) return error("Invalid type"); ResultTy = PointerType::get(ResultTy, AddressSpace); break; } case bitc::TYPE_CODE_FUNCTION_OLD: { // FIXME: attrid is dead, remove it in LLVM 4.0 // FUNCTION: [vararg, attrid, retty, paramty x N] if (Record.size() < 3) return error("Invalid record"); SmallVector<Type*, 8> ArgTys; for (unsigned i = 3, e = Record.size(); i != e; ++i) { if (Type *T = getTypeByID(Record[i])) ArgTys.push_back(T); else break; } ResultTy = getTypeByID(Record[2]); if (!ResultTy || ArgTys.size() < Record.size()-3) return error("Invalid type"); ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]); break; } case bitc::TYPE_CODE_FUNCTION: { // FUNCTION: [vararg, retty, paramty x N] if (Record.size() < 2) return error("Invalid record"); SmallVector<Type*, 8> ArgTys; for (unsigned i = 2, e = Record.size(); i != e; ++i) { if (Type *T = getTypeByID(Record[i])) { if (!FunctionType::isValidArgumentType(T)) return error("Invalid function argument type"); ArgTys.push_back(T); } else break; } ResultTy = getTypeByID(Record[1]); if (!ResultTy || ArgTys.size() < Record.size()-2) return error("Invalid type"); ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]); break; } case bitc::TYPE_CODE_STRUCT_ANON: { // STRUCT: [ispacked, eltty x N] if (Record.size() < 1) return error("Invalid record"); SmallVector<Type*, 8> EltTys; for (unsigned i = 1, e = Record.size(); i != e; ++i) { if (Type *T = getTypeByID(Record[i])) EltTys.push_back(T); else break; } if (EltTys.size() != Record.size()-1) return error("Invalid type"); ResultTy = StructType::get(Context, EltTys, Record[0]); break; } case bitc::TYPE_CODE_STRUCT_NAME: // STRUCT_NAME: [strchr x N] if (convertToString(Record, 0, TypeName)) return error("Invalid record"); continue; case bitc::TYPE_CODE_STRUCT_NAMED: { // STRUCT: [ispacked, eltty x N] if (Record.size() < 1) return error("Invalid record"); if (NumRecords >= TypeList.size()) return error("Invalid TYPE table"); // Check to see if this was forward referenced, if so fill in the temp. StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]); if (Res) { Res->setName(TypeName); TypeList[NumRecords] = nullptr; } else // Otherwise, create a new struct. Res = createIdentifiedStructType(Context, TypeName); // HLSL Change Begin - avoid name collision for dxil types. bool bNameCollision = Res->getName().size() > TypeName.size(); //TypeName.clear(); // HLSL Change End. SmallVector<Type*, 8> EltTys; for (unsigned i = 1, e = Record.size(); i != e; ++i) { if (Type *T = getTypeByID(Record[i])) EltTys.push_back(T); else break; } if (EltTys.size() != Record.size()-1) return error("Invalid record"); Res->setBody(EltTys, Record[0]); // HLSL Change Begin - avoid name collision for dxil types. if (bNameCollision) { StructType *otherType = TheModule->getTypeByName(TypeName); if (otherType->isLayoutIdentical(Res)) { Res = otherType; } } TypeName.clear(); // HLSL Change End. ResultTy = Res; break; } case bitc::TYPE_CODE_OPAQUE: { // OPAQUE: [] if (Record.size() != 1) return error("Invalid record"); if (NumRecords >= TypeList.size()) return error("Invalid TYPE table"); // Check to see if this was forward referenced, if so fill in the temp. StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]); if (Res) { Res->setName(TypeName); TypeList[NumRecords] = nullptr; } else // Otherwise, create a new struct with no body. Res = createIdentifiedStructType(Context, TypeName); TypeName.clear(); ResultTy = Res; break; } case bitc::TYPE_CODE_ARRAY: // ARRAY: [numelts, eltty] if (Record.size() < 2) return error("Invalid record"); ResultTy = getTypeByID(Record[1]); if (!ResultTy || !ArrayType::isValidElementType(ResultTy)) return error("Invalid type"); ResultTy = ArrayType::get(ResultTy, Record[0]); break; case bitc::TYPE_CODE_VECTOR: // VECTOR: [numelts, eltty] if (Record.size() < 2) return error("Invalid record"); if (Record[0] == 0) return error("Invalid vector length"); ResultTy = getTypeByID(Record[1]); if (!ResultTy || !StructType::isValidElementType(ResultTy)) return error("Invalid type"); ResultTy = VectorType::get(ResultTy, Record[0]); break; } if (NumRecords >= TypeList.size()) return error("Invalid TYPE table"); if (TypeList[NumRecords]) return error( "Invalid TYPE table: Only named structs can be forward referenced"); assert(ResultTy && "Didn't read a type?"); TypeList[NumRecords++] = ResultTy; } } std::error_code BitcodeReader::parseValueSymbolTable() { if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID)) return error("Invalid record"); SmallVector<uint64_t, 64> Record; Triple TT(TheModule->getTargetTriple()); // Read all the records for this value table. SmallString<128> ValueName; while (1) { // HLSL Change Starts - count skipped blocks unsigned skipCount = 0; BitstreamEntry Entry = Stream.advanceSkippingSubblocks(0, &skipCount); if (skipCount) ReportWarning(DiagnosticHandler, "Unrecognized subblock"); // HLSL Change End switch (Entry.Kind) { case BitstreamEntry::SubBlock: // Handled for us already. case BitstreamEntry::Error: return error("Malformed block"); case BitstreamEntry::EndBlock: return std::error_code(); case BitstreamEntry::Record: // The interesting case. break; } // Read a record. Record.clear(); switch (Stream.readRecord(Entry.ID, Record)) { default: // Default behavior: unknown type. break; case bitc::VST_CODE_ENTRY: { // VST_ENTRY: [valueid, namechar x N] if (convertToString(Record, 1, ValueName)) return error("Invalid record"); unsigned ValueID = Record[0]; if (ValueID >= ValueList.size() || !ValueList[ValueID]) return error("Invalid record"); Value *V = ValueList[ValueID]; V->setName(StringRef(ValueName.data(), ValueName.size())); if (auto *GO = dyn_cast<GlobalObject>(V)) { if (GO->getComdat() == reinterpret_cast<Comdat *>(1)) { if (TT.isOSBinFormatMachO()) GO->setComdat(nullptr); else GO->setComdat(TheModule->getOrInsertComdat(V->getName())); } } ValueName.clear(); break; } case bitc::VST_CODE_BBENTRY: { if (convertToString(Record, 1, ValueName)) return error("Invalid record"); BasicBlock *BB = getBasicBlock(Record[0]); if (!BB) return error("Invalid record"); BB->setName(StringRef(ValueName.data(), ValueName.size())); ValueName.clear(); break; } } } } static int64_t unrotateSign(uint64_t U) { return U & 1 ? ~(U >> 1) : U >> 1; } // HLSL Change - Begin // This function takes a list of strings that corresponds to the list of named // metadata that we want to materialize, and materialize them efficiently. // // Note: This function will only materialize metadata that are the following // types: // // MDString e.g. !"my metadata string" // MDNode e.g. !10 = !{ !"my node", !32, !48 } // distinct MDNode e.g. !10 = distinct !{ !"my node", !32, !48 } // ValueAsMetadata e.g. true, 0, 10 // // Everything else will appear as !<temporary> // // We first skip through the whole METADATA_BLOCK_ID block. As we do, we take // note of the named metadata we want, and push their operands into a queue. // We also record the bit offsets where all String, Node, and Value metadata. // // Next, we go through the queue, and skip to their bit offsets and load their // data (but only if they're the types listed above). If the metadata has their // own operands, we insert them into the queue as well. // // std::error_code BitcodeReader::parseSelectNamedMetadata(ArrayRef<StringRef> NamedMetadata) { // Remember our bit position right at the start, because we're going to // jump back to it later. uint64_t OriginalBitPos = Stream.GetCurrentBitNo(); // Buffer used to read record operands. SmallVector<uint64_t, 64> Record; SmallVector<uint8_t, 32> Uint8Record; // A map that we use to remember where we saw each value number struct Info { uint64_t BitPos; uint64_t ID; bool IsString; }; std::vector<Info> NodePositions; unsigned NextMDValueNo = MDValueList.size(); if (Stream.EnterSubBlock(bitc::METADATA_BLOCK_ID)) return error("Invalid record"); SmallVector<uint64_t, 1> AbbrevDefines; std::vector<uint64_t> NodeQueue; std::unordered_set<uint64_t> NodeQueueSet; auto add_to_queue = [&NodeQueueSet, &NodeQueue](uint64_t Val) { if (NodeQueueSet.insert(Val).second) NodeQueue.push_back(Val); }; // Read all the records. while (1) { // If we encounter a DEFINE_ABBREV record, record where it is. // We need to go back to them to recover the abbreviation list. // There shouldn't be more than one... but just in case. if (Stream.PeekCode() == bitc::DEFINE_ABBREV) { AbbrevDefines.push_back(Stream.GetCurrentBitNo()); } // HLSL Change Starts - count skipped blocks unsigned skipCount = 0; BitstreamEntry Entry = Stream.advanceSkippingSubblocks(0, &skipCount); if (skipCount) ReportWarning(DiagnosticHandler, "Unrecognized subblock"); // HLSL Change End bool Stop = false; switch (Entry.Kind) { case BitstreamEntry::SubBlock: // Handled for us already. case BitstreamEntry::Error: return error("Malformed block"); case BitstreamEntry::EndBlock: MDValueList.tryToResolveCycles(); Stop = true; break; case BitstreamEntry::Record: // The interesting case. break; } if (Stop) break; // Peek the record type without changing bit-position and loading // the record's operands. unsigned PeekCode = Stream.peekRecord(Entry.ID); // For the first pass, we're only interested in named metadata. if (PeekCode != bitc::METADATA_NAME) { // If it's one of these types of things, remember where we actually // found it. switch (PeekCode) { case bitc::METADATA_DISTINCT_NODE: case bitc::METADATA_NODE: case bitc::METADATA_STRING: case bitc::METADATA_VALUE: auto old_size = NodePositions.size(); NodePositions.resize(NextMDValueNo + 1); memset(NodePositions.data() + old_size, 0, sizeof(NodePositions[0]) * (NextMDValueNo + 1 - old_size)); NodePositions[NextMDValueNo] = { Stream.GetCurrentBitNo(), Entry.ID, PeekCode == bitc::METADATA_STRING }; break; } // Skip the record without loading anything. Stream.skipRecord(Entry.ID); NextMDValueNo++; continue; } // Read a record. Record.clear(); unsigned Code = Stream.readRecord(Entry.ID, Record); // Read name of the named metadata. SmallString<8> Name(Record.begin(), Record.end()); Record.clear(); Code = Stream.ReadCode(); // Figure out if it's one of the named metadata that we actually want. bool found = false; for (unsigned i = 0; i < NamedMetadata.size(); i++) { if (Name == NamedMetadata[i]) { found = true; break; } } // If it's not interesting to us, then just skip. if (!found) { Stream.skipRecord(Code); } else { unsigned NextBitCode = Stream.readRecord(Code, Record); if (NextBitCode != bitc::METADATA_NAMED_NODE) return error("METADATA_NAME not followed by METADATA_NAMED_NODE"); // Read named metadata elements. unsigned Size = Record.size(); NamedMDNode *NMD = TheModule->getOrInsertNamedMetadata(Name); for (unsigned i = 0; i != Size; ++i) { MDNode *MD = dyn_cast_or_null<MDNode>(MDValueList.getValueFwdRef(Record[i])); if (!MD) return error("Invalid record"); add_to_queue(Record[i]); // Add this MD number to our queue, so we know to try to read it later. NMD->addOperand(MD); } } } // Now that we have gathered all the metadata operands that the named // metadata need... // Go back to the beginning. Stream.JumpToBit(OriginalBitPos); // Re-enter the metadata block. if (Stream.EnterSubBlock(bitc::METADATA_BLOCK_ID)) return error("Invalid record"); // Load all the abbreviations's again, since exiting the block and re-entering // the block has wiped them clean. for (unsigned i = 0; i < AbbrevDefines.size(); i++) { Stream.JumpToBit(AbbrevDefines[i]); while (1) { unsigned Code = Stream.ReadCode(); if (Code == bitc::DEFINE_ABBREV) { Stream.ReadAbbrevRecord(); continue; } else { break; } } } std::string String; // String buffer used to read MD string // Go through the queue and read all the metadata we want. for (unsigned i = 0; i < NodeQueue.size(); i++) { uint64_t MDNumber = NodeQueue[i]; // If we never memorized the location for this MD No, it means // it wasn't one of the MD types that we care about. if (MDNumber >= NodePositions.size()) continue; Info I = NodePositions[MDNumber]; if (I.BitPos == 0) continue; // Go back to the bit where we read the record Stream.JumpToBit(I.BitPos); // Read a record Record.clear(); unsigned Code = 0; // If it's a string, use our special Uint8Buffer to speed up the reading. if (I.IsString) { Uint8Record.clear(); Code = Stream.readRecord(I.ID, Record, nullptr, &Uint8Record); assert(!Uint8Record.empty() || (Record.empty() && Uint8Record.empty())); } else { Code = Stream.readRecord(I.ID, Record); } // Read the actual data. This code is largely copied from parseMetadata bool IsDistinct = false; switch (Code) { default: llvm_unreachable("Can't actually be anything else."); break; case bitc::METADATA_VALUE: { if (Record.size() != 2) return error("Invalid record"); Type *Ty = getTypeByID(Record[0]); if (Ty->isMetadataTy() || Ty->isVoidTy()) return error("Invalid record"); MDValueList.assignValue( ValueAsMetadata::get(ValueList.getValueFwdRef(Record[1], Ty)), MDNumber); break; } case bitc::METADATA_DISTINCT_NODE: IsDistinct = true; LLVM_FALLTHROUGH; // HLSL Change case bitc::METADATA_NODE: { SmallVector<Metadata *, 8> Elts; Elts.reserve(Record.size()); for (unsigned ID : Record) { Elts.push_back(ID ? MDValueList.getValueFwdRef(ID - 1) : nullptr); // If this ID is not a null MD, add to queue. if (ID) add_to_queue(ID - 1); } MDValueList.assignValue(IsDistinct ? MDNode::getDistinct(Context, Elts) : MDNode::get(Context, Elts), MDNumber); break; } case bitc::METADATA_STRING: { String.clear(); String.resize(Uint8Record.size()); memcpy(&String[0], Uint8Record.data(), Uint8Record.size()); llvm::UpgradeMDStringConstant(String); Metadata *MD = MDString::get(Context, String); MDValueList.assignValue(MD, MDNumber); break; } } } return std::error_code(); } std::error_code BitcodeReader::materializeSelectNamedMetadata(ArrayRef<StringRef> NamedMetadata) { for (uint64_t BitPos : DeferredMetadataInfo) { // Move the bit stream to the saved position. Stream.JumpToBit(BitPos); if (std::error_code EC = parseSelectNamedMetadata(NamedMetadata)) return EC; } DeferredMetadataInfo.clear(); return std::error_code(); } // HLSL Change - end std::error_code BitcodeReader::parseMetadata() { IsMetadataMaterialized = true; unsigned NextMDValueNo = MDValueList.size(); if (Stream.EnterSubBlock(bitc::METADATA_BLOCK_ID)) return error("Invalid record"); SmallVector<uint64_t, 64> Record; SmallVector<uint8_t, 64> Uint8Record; // HLSL Change auto getMD = [&](unsigned ID) -> Metadata *{ return MDValueList.getValueFwdRef(ID); }; auto getMDOrNull = [&](unsigned ID) -> Metadata *{ if (ID) return getMD(ID - 1); return nullptr; }; auto getMDString = [&](unsigned ID) -> MDString *{ // This requires that the ID is not really a forward reference. In // particular, the MDString must already have been resolved. return cast_or_null<MDString>(getMDOrNull(ID)); }; #define GET_OR_DISTINCT(CLASS, DISTINCT, ARGS) \ (DISTINCT ? CLASS::getDistinct ARGS : CLASS::get ARGS) // Read all the records. while (1) { // HLSL Change Starts - count skipped blocks unsigned skipCount = 0; BitstreamEntry Entry = Stream.advanceSkippingSubblocks(0, &skipCount); if (skipCount) ReportWarning(DiagnosticHandler, "Unrecognized subblock"); // HLSL Change End switch (Entry.Kind) { case BitstreamEntry::SubBlock: // Handled for us already. case BitstreamEntry::Error: return error("Malformed block"); case BitstreamEntry::EndBlock: MDValueList.tryToResolveCycles(); return std::error_code(); case BitstreamEntry::Record: // The interesting case. break; } #if 1 // HLSL Change // If it's a string metadata, use our special Uint8Record to speed // up reading. unsigned PeekCode = Stream.peekRecord(Entry.ID); unsigned Code = 0; Record.clear(); if (PeekCode == bitc::METADATA_STRING) { Uint8Record.clear(); Code = Stream.readRecord(Entry.ID, Record, nullptr, &Uint8Record); assert(!Uint8Record.empty() || (Record.empty() && Uint8Record.empty())); } else { Code = Stream.readRecord(Entry.ID, Record); } #else // HLSL Change // Read a record. Record.clear(); unsigned Code = Stream.readRecord(Entry.ID, Record); #endif // HLSL Change std::string String; // HLSL Change - Reuse buffer for loading string. bool IsDistinct = false; switch (Code) { default: // Default behavior: ignore. break; case bitc::METADATA_NAME: { // Read name of the named metadata. SmallString<8> Name(Record.begin(), Record.end()); Record.clear(); Code = Stream.ReadCode(); unsigned NextBitCode = Stream.readRecord(Code, Record); if (NextBitCode != bitc::METADATA_NAMED_NODE) return error("METADATA_NAME not followed by METADATA_NAMED_NODE"); // Read named metadata elements. unsigned Size = Record.size(); NamedMDNode *NMD = TheModule->getOrInsertNamedMetadata(Name); for (unsigned i = 0; i != Size; ++i) { MDNode *MD = dyn_cast_or_null<MDNode>(MDValueList.getValueFwdRef(Record[i])); if (!MD) return error("Invalid record"); NMD->addOperand(MD); } break; } case bitc::METADATA_OLD_FN_NODE: { // FIXME: Remove in 4.0. // This is a LocalAsMetadata record, the only type of function-local // metadata. if (Record.size() % 2 == 1) return error("Invalid record"); // If this isn't a LocalAsMetadata record, we're dropping it. This used // to be legal, but there's no upgrade path. auto dropRecord = [&] { MDValueList.assignValue(MDNode::get(Context, None), NextMDValueNo++); }; if (Record.size() != 2) { dropRecord(); break; } Type *Ty = getTypeByID(Record[0]); if (Ty->isMetadataTy() || Ty->isVoidTy()) { dropRecord(); break; } MDValueList.assignValue( LocalAsMetadata::get(ValueList.getValueFwdRef(Record[1], Ty)), NextMDValueNo++); break; } case bitc::METADATA_OLD_NODE: { // FIXME: Remove in 4.0. if (Record.size() % 2 == 1) return error("Invalid record"); unsigned Size = Record.size(); SmallVector<Metadata *, 8> Elts; for (unsigned i = 0; i != Size; i += 2) { Type *Ty = getTypeByID(Record[i]); if (!Ty) return error("Invalid record"); if (Ty->isMetadataTy()) Elts.push_back(MDValueList.getValueFwdRef(Record[i+1])); else if (!Ty->isVoidTy()) { auto *MD = ValueAsMetadata::get(ValueList.getValueFwdRef(Record[i + 1], Ty)); assert(isa<ConstantAsMetadata>(MD) && "Expected non-function-local metadata"); Elts.push_back(MD); } else Elts.push_back(nullptr); } MDValueList.assignValue(MDNode::get(Context, Elts), NextMDValueNo++); break; } case bitc::METADATA_VALUE: { if (Record.size() != 2) return error("Invalid record"); Type *Ty = getTypeByID(Record[0]); if (Ty->isMetadataTy() || Ty->isVoidTy()) return error("Invalid record"); MDValueList.assignValue( ValueAsMetadata::get(ValueList.getValueFwdRef(Record[1], Ty)), NextMDValueNo++); break; } case bitc::METADATA_DISTINCT_NODE: IsDistinct = true; LLVM_FALLTHROUGH; // HLSL Change case bitc::METADATA_NODE: { SmallVector<Metadata *, 8> Elts; Elts.reserve(Record.size()); for (unsigned ID : Record) Elts.push_back(ID ? MDValueList.getValueFwdRef(ID - 1) : nullptr); MDValueList.assignValue(IsDistinct ? MDNode::getDistinct(Context, Elts) : MDNode::get(Context, Elts), NextMDValueNo++); break; } case bitc::METADATA_LOCATION: { if (Record.size() != 5) return error("Invalid record"); unsigned Line = Record[1]; unsigned Column = Record[2]; MDNode *Scope = cast<MDNode>(MDValueList.getValueFwdRef(Record[3])); Metadata *InlinedAt = Record[4] ? MDValueList.getValueFwdRef(Record[4] - 1) : nullptr; MDValueList.assignValue( GET_OR_DISTINCT(DILocation, Record[0], (Context, Line, Column, Scope, InlinedAt)), NextMDValueNo++); break; } case bitc::METADATA_GENERIC_DEBUG: { if (Record.size() < 4) return error("Invalid record"); unsigned Tag = Record[1]; unsigned Version = Record[2]; if (Tag >= 1u << 16 || Version != 0) return error("Invalid record"); auto *Header = getMDString(Record[3]); SmallVector<Metadata *, 8> DwarfOps; for (unsigned I = 4, E = Record.size(); I != E; ++I) DwarfOps.push_back(Record[I] ? MDValueList.getValueFwdRef(Record[I] - 1) : nullptr); MDValueList.assignValue(GET_OR_DISTINCT(GenericDINode, Record[0], (Context, Tag, Header, DwarfOps)), NextMDValueNo++); break; } case bitc::METADATA_SUBRANGE: { if (Record.size() != 3) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DISubrange, Record[0], (Context, Record[1], unrotateSign(Record[2]))), NextMDValueNo++); break; } case bitc::METADATA_ENUMERATOR: { if (Record.size() != 3) return error("Invalid record"); MDValueList.assignValue(GET_OR_DISTINCT(DIEnumerator, Record[0], (Context, unrotateSign(Record[1]), getMDString(Record[2]))), NextMDValueNo++); break; } case bitc::METADATA_BASIC_TYPE: { if (Record.size() != 6) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DIBasicType, Record[0], (Context, Record[1], getMDString(Record[2]), Record[3], Record[4], Record[5])), NextMDValueNo++); break; } case bitc::METADATA_DERIVED_TYPE: { if (Record.size() != 12) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DIDerivedType, Record[0], (Context, Record[1], getMDString(Record[2]), getMDOrNull(Record[3]), Record[4], getMDOrNull(Record[5]), getMDOrNull(Record[6]), Record[7], Record[8], Record[9], Record[10], getMDOrNull(Record[11]))), NextMDValueNo++); break; } case bitc::METADATA_COMPOSITE_TYPE: { if (Record.size() != 16) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DICompositeType, Record[0], (Context, Record[1], getMDString(Record[2]), getMDOrNull(Record[3]), Record[4], getMDOrNull(Record[5]), getMDOrNull(Record[6]), Record[7], Record[8], Record[9], Record[10], getMDOrNull(Record[11]), Record[12], getMDOrNull(Record[13]), getMDOrNull(Record[14]), getMDString(Record[15]))), NextMDValueNo++); break; } case bitc::METADATA_SUBROUTINE_TYPE: { if (Record.size() != 3) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DISubroutineType, Record[0], (Context, Record[1], getMDOrNull(Record[2]))), NextMDValueNo++); break; } case bitc::METADATA_MODULE: { if (Record.size() != 6) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DIModule, Record[0], (Context, getMDOrNull(Record[1]), getMDString(Record[2]), getMDString(Record[3]), getMDString(Record[4]), getMDString(Record[5]))), NextMDValueNo++); break; } case bitc::METADATA_FILE: { if (Record.size() != 3) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DIFile, Record[0], (Context, getMDString(Record[1]), getMDString(Record[2]))), NextMDValueNo++); break; } case bitc::METADATA_COMPILE_UNIT: { if (Record.size() < 14 || Record.size() > 15) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT( DICompileUnit, Record[0], (Context, Record[1], getMDOrNull(Record[2]), getMDString(Record[3]), Record[4], getMDString(Record[5]), Record[6], getMDString(Record[7]), Record[8], getMDOrNull(Record[9]), getMDOrNull(Record[10]), getMDOrNull(Record[11]), getMDOrNull(Record[12]), getMDOrNull(Record[13]), Record.size() == 14 ? 0 : Record[14])), NextMDValueNo++); break; } case bitc::METADATA_SUBPROGRAM: { if (Record.size() != 19) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT( DISubprogram, Record[0], (Context, getMDOrNull(Record[1]), getMDString(Record[2]), getMDString(Record[3]), getMDOrNull(Record[4]), Record[5], getMDOrNull(Record[6]), Record[7], Record[8], Record[9], getMDOrNull(Record[10]), Record[11], Record[12], Record[13], Record[14], getMDOrNull(Record[15]), getMDOrNull(Record[16]), getMDOrNull(Record[17]), getMDOrNull(Record[18]))), NextMDValueNo++); break; } case bitc::METADATA_LEXICAL_BLOCK: { if (Record.size() != 5) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DILexicalBlock, Record[0], (Context, getMDOrNull(Record[1]), getMDOrNull(Record[2]), Record[3], Record[4])), NextMDValueNo++); break; } case bitc::METADATA_LEXICAL_BLOCK_FILE: { if (Record.size() != 4) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DILexicalBlockFile, Record[0], (Context, getMDOrNull(Record[1]), getMDOrNull(Record[2]), Record[3])), NextMDValueNo++); break; } case bitc::METADATA_NAMESPACE: { if (Record.size() != 5) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DINamespace, Record[0], (Context, getMDOrNull(Record[1]), getMDOrNull(Record[2]), getMDString(Record[3]), Record[4])), NextMDValueNo++); break; } case bitc::METADATA_TEMPLATE_TYPE: { if (Record.size() != 3) return error("Invalid record"); MDValueList.assignValue(GET_OR_DISTINCT(DITemplateTypeParameter, Record[0], (Context, getMDString(Record[1]), getMDOrNull(Record[2]))), NextMDValueNo++); break; } case bitc::METADATA_TEMPLATE_VALUE: { if (Record.size() != 5) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DITemplateValueParameter, Record[0], (Context, Record[1], getMDString(Record[2]), getMDOrNull(Record[3]), getMDOrNull(Record[4]))), NextMDValueNo++); break; } case bitc::METADATA_GLOBAL_VAR: { if (Record.size() != 11) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DIGlobalVariable, Record[0], (Context, getMDOrNull(Record[1]), getMDString(Record[2]), getMDString(Record[3]), getMDOrNull(Record[4]), Record[5], getMDOrNull(Record[6]), Record[7], Record[8], getMDOrNull(Record[9]), getMDOrNull(Record[10]))), NextMDValueNo++); break; } case bitc::METADATA_LOCAL_VAR: { // 10th field is for the obseleted 'inlinedAt:' field. if (Record.size() != 9 && Record.size() != 10) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DILocalVariable, Record[0], (Context, Record[1], getMDOrNull(Record[2]), getMDString(Record[3]), getMDOrNull(Record[4]), Record[5], getMDOrNull(Record[6]), Record[7], Record[8])), NextMDValueNo++); break; } case bitc::METADATA_EXPRESSION: { if (Record.size() < 1) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DIExpression, Record[0], (Context, makeArrayRef(Record).slice(1))), NextMDValueNo++); break; } case bitc::METADATA_OBJC_PROPERTY: { if (Record.size() != 8) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DIObjCProperty, Record[0], (Context, getMDString(Record[1]), getMDOrNull(Record[2]), Record[3], getMDString(Record[4]), getMDString(Record[5]), Record[6], getMDOrNull(Record[7]))), NextMDValueNo++); break; } case bitc::METADATA_IMPORTED_ENTITY: { if (Record.size() != 6) return error("Invalid record"); MDValueList.assignValue( GET_OR_DISTINCT(DIImportedEntity, Record[0], (Context, Record[1], getMDOrNull(Record[2]), getMDOrNull(Record[3]), Record[4], getMDString(Record[5]))), NextMDValueNo++); break; } case bitc::METADATA_STRING: { #if 0 std::string String(Record.begin(), Record.end()); #else String.resize(Uint8Record.size()); memcpy(&String[0], Uint8Record.data(), Uint8Record.size()); #endif llvm::UpgradeMDStringConstant(String); Metadata *MD = MDString::get(Context, String); MDValueList.assignValue(MD, NextMDValueNo++); break; } case bitc::METADATA_KIND: { if (Record.size() < 2) return error("Invalid record"); unsigned Kind = Record[0]; SmallString<8> Name(Record.begin()+1, Record.end()); unsigned NewKind = TheModule->getMDKindID(Name.str()); if (!MDKindMap.insert(std::make_pair(Kind, NewKind)).second) return error("Conflicting METADATA_KIND records"); break; } } } #undef GET_OR_DISTINCT } /// Decode a signed value stored with the sign bit in the LSB for dense VBR /// encoding. uint64_t BitcodeReader::decodeSignRotatedValue(uint64_t V) { if ((V & 1) == 0) return V >> 1; if (V != 1) return -(V >> 1); // There is no such thing as -0 with integers. "-0" really means MININT. return 1ULL << 63; } /// Resolve all of the initializers for global values and aliases that we can. std::error_code BitcodeReader::resolveGlobalAndAliasInits() { std::vector<std::pair<GlobalVariable*, unsigned> > GlobalInitWorklist; std::vector<std::pair<GlobalAlias*, unsigned> > AliasInitWorklist; std::vector<std::pair<Function*, unsigned> > FunctionPrefixWorklist; std::vector<std::pair<Function*, unsigned> > FunctionPrologueWorklist; std::vector<std::pair<Function*, unsigned> > FunctionPersonalityFnWorklist; GlobalInitWorklist.swap(GlobalInits); AliasInitWorklist.swap(AliasInits); FunctionPrefixWorklist.swap(FunctionPrefixes); FunctionPrologueWorklist.swap(FunctionPrologues); FunctionPersonalityFnWorklist.swap(FunctionPersonalityFns); while (!GlobalInitWorklist.empty()) { unsigned ValID = GlobalInitWorklist.back().second; if (ValID >= ValueList.size()) { // Not ready to resolve this yet, it requires something later in the file. GlobalInits.push_back(GlobalInitWorklist.back()); } else { if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID])) GlobalInitWorklist.back().first->setInitializer(C); else return error("Expected a constant"); } GlobalInitWorklist.pop_back(); } while (!AliasInitWorklist.empty()) { unsigned ValID = AliasInitWorklist.back().second; if (ValID >= ValueList.size()) { AliasInits.push_back(AliasInitWorklist.back()); } else { Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]); if (!C) return error("Expected a constant"); GlobalAlias *Alias = AliasInitWorklist.back().first; if (C->getType() != Alias->getType()) return error("Alias and aliasee types don't match"); Alias->setAliasee(C); } AliasInitWorklist.pop_back(); } while (!FunctionPrefixWorklist.empty()) { unsigned ValID = FunctionPrefixWorklist.back().second; if (ValID >= ValueList.size()) { FunctionPrefixes.push_back(FunctionPrefixWorklist.back()); } else { if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID])) FunctionPrefixWorklist.back().first->setPrefixData(C); else return error("Expected a constant"); } FunctionPrefixWorklist.pop_back(); } while (!FunctionPrologueWorklist.empty()) { unsigned ValID = FunctionPrologueWorklist.back().second; if (ValID >= ValueList.size()) { FunctionPrologues.push_back(FunctionPrologueWorklist.back()); } else { if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID])) FunctionPrologueWorklist.back().first->setPrologueData(C); else return error("Expected a constant"); } FunctionPrologueWorklist.pop_back(); } while (!FunctionPersonalityFnWorklist.empty()) { unsigned ValID = FunctionPersonalityFnWorklist.back().second; if (ValID >= ValueList.size()) { FunctionPersonalityFns.push_back(FunctionPersonalityFnWorklist.back()); } else { if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID])) FunctionPersonalityFnWorklist.back().first->setPersonalityFn(C); else return error("Expected a constant"); } FunctionPersonalityFnWorklist.pop_back(); } return std::error_code(); } static APInt readWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits) { SmallVector<uint64_t, 8> Words(Vals.size()); std::transform(Vals.begin(), Vals.end(), Words.begin(), BitcodeReader::decodeSignRotatedValue); return APInt(TypeBits, Words); } std::error_code BitcodeReader::parseConstants() { if (Stream.EnterSubBlock(bitc::CONSTANTS_BLOCK_ID)) return error("Invalid record"); SmallVector<uint64_t, 64> Record; // Read all the records for this value table. Type *CurTy = Type::getInt32Ty(Context); unsigned NextCstNo = ValueList.size(); while (1) { // HLSL Change Starts - count skipped blocks unsigned skipCount = 0; BitstreamEntry Entry = Stream.advanceSkippingSubblocks(0, &skipCount); if (skipCount) ReportWarning(DiagnosticHandler, "Unrecognized subblock"); // HLSL Change End switch (Entry.Kind) { case BitstreamEntry::SubBlock: // Handled for us already. case BitstreamEntry::Error: return error("Malformed block"); case BitstreamEntry::EndBlock: if (NextCstNo != ValueList.size()) return error("Invalid ronstant reference"); // Once all the constants have been read, go through and resolve forward // references. ValueList.resolveConstantForwardRefs(); return std::error_code(); case BitstreamEntry::Record: // The interesting case. break; } // Read a record. Record.clear(); Value *V = nullptr; unsigned BitCode = Stream.readRecord(Entry.ID, Record); switch (BitCode) { default: // Default behavior: unknown constant case bitc::CST_CODE_UNDEF: // UNDEF V = UndefValue::get(CurTy); break; case bitc::CST_CODE_SETTYPE: // SETTYPE: [typeid] if (Record.empty()) return error("Invalid record"); if (Record[0] >= TypeList.size() || !TypeList[Record[0]]) return error("Invalid record"); CurTy = TypeList[Record[0]]; continue; // Skip the ValueList manipulation. case bitc::CST_CODE_NULL: // NULL V = Constant::getNullValue(CurTy); break; case bitc::CST_CODE_INTEGER: // INTEGER: [intval] if (!CurTy->isIntegerTy() || Record.empty()) return error("Invalid record"); V = ConstantInt::get(CurTy, decodeSignRotatedValue(Record[0])); break; case bitc::CST_CODE_WIDE_INTEGER: {// WIDE_INTEGER: [n x intval] if (!CurTy->isIntegerTy() || Record.empty()) return error("Invalid record"); APInt VInt = readWideAPInt(Record, cast<IntegerType>(CurTy)->getBitWidth()); V = ConstantInt::get(Context, VInt); break; } case bitc::CST_CODE_FLOAT: { // FLOAT: [fpval] if (Record.empty()) return error("Invalid record"); if (CurTy->isHalfTy()) V = ConstantFP::get(Context, APFloat(APFloat::IEEEhalf, APInt(16, (uint16_t)Record[0]))); else if (CurTy->isFloatTy()) V = ConstantFP::get(Context, APFloat(APFloat::IEEEsingle, APInt(32, (uint32_t)Record[0]))); else if (CurTy->isDoubleTy()) V = ConstantFP::get(Context, APFloat(APFloat::IEEEdouble, APInt(64, Record[0]))); else if (CurTy->isX86_FP80Ty()) { // Bits are not stored the same way as a normal i80 APInt, compensate. uint64_t Rearrange[2]; Rearrange[0] = (Record[1] & 0xffffLL) | (Record[0] << 16); Rearrange[1] = Record[0] >> 48; V = ConstantFP::get(Context, APFloat(APFloat::x87DoubleExtended, APInt(80, Rearrange))); } else if (CurTy->isFP128Ty()) V = ConstantFP::get(Context, APFloat(APFloat::IEEEquad, APInt(128, Record))); else if (CurTy->isPPC_FP128Ty()) V = ConstantFP::get(Context, APFloat(APFloat::PPCDoubleDouble, APInt(128, Record))); else V = UndefValue::get(CurTy); break; } case bitc::CST_CODE_AGGREGATE: {// AGGREGATE: [n x value number] if (Record.empty()) return error("Invalid record"); unsigned Size = Record.size(); SmallVector<Constant*, 16> Elts; if (StructType *STy = dyn_cast<StructType>(CurTy)) { for (unsigned i = 0; i != Size; ++i) Elts.push_back(ValueList.getConstantFwdRef(Record[i], STy->getElementType(i))); V = ConstantStruct::get(STy, Elts); } else if (ArrayType *ATy = dyn_cast<ArrayType>(CurTy)) { Type *EltTy = ATy->getElementType(); for (unsigned i = 0; i != Size; ++i) Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy)); V = ConstantArray::get(ATy, Elts); } else if (VectorType *VTy = dyn_cast<VectorType>(CurTy)) { Type *EltTy = VTy->getElementType(); for (unsigned i = 0; i != Size; ++i) Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy)); V = ConstantVector::get(Elts); } else { V = UndefValue::get(CurTy); } break; } case bitc::CST_CODE_STRING: // STRING: [values] case bitc::CST_CODE_CSTRING: { // CSTRING: [values] if (Record.empty()) return error("Invalid record"); SmallString<16> Elts(Record.begin(), Record.end()); V = ConstantDataArray::getString(Context, Elts, BitCode == bitc::CST_CODE_CSTRING); break; } case bitc::CST_CODE_DATA: {// DATA: [n x value] if (Record.empty()) return error("Invalid record"); Type *EltTy = cast<SequentialType>(CurTy)->getElementType(); unsigned Size = Record.size(); if (EltTy->isIntegerTy(8)) { SmallVector<uint8_t, 16> Elts(Record.begin(), Record.end()); if (isa<VectorType>(CurTy)) V = ConstantDataVector::get(Context, Elts); else V = ConstantDataArray::get(Context, Elts); } else if (EltTy->isIntegerTy(16)) { SmallVector<uint16_t, 16> Elts(Record.begin(), Record.end()); if (isa<VectorType>(CurTy)) V = ConstantDataVector::get(Context, Elts); else V = ConstantDataArray::get(Context, Elts); } else if (EltTy->isIntegerTy(32)) { SmallVector<uint32_t, 16> Elts(Record.begin(), Record.end()); if (isa<VectorType>(CurTy)) V = ConstantDataVector::get(Context, Elts); else V = ConstantDataArray::get(Context, Elts); } else if (EltTy->isIntegerTy(64)) { SmallVector<uint64_t, 16> Elts(Record.begin(), Record.end()); if (isa<VectorType>(CurTy)) V = ConstantDataVector::get(Context, Elts); else V = ConstantDataArray::get(Context, Elts); } else if (EltTy->isFloatTy()) { SmallVector<float, 16> Elts(Size); std::transform(Record.begin(), Record.end(), Elts.begin(), BitsToFloat); if (isa<VectorType>(CurTy)) V = ConstantDataVector::get(Context, Elts); else V = ConstantDataArray::get(Context, Elts); } else if (EltTy->isDoubleTy()) { SmallVector<double, 16> Elts(Size); std::transform(Record.begin(), Record.end(), Elts.begin(), BitsToDouble); if (isa<VectorType>(CurTy)) V = ConstantDataVector::get(Context, Elts); else V = ConstantDataArray::get(Context, Elts); } else { return error("Invalid type for value"); } break; } case bitc::CST_CODE_CE_BINOP: { // CE_BINOP: [opcode, opval, opval] if (Record.size() < 3) return error("Invalid record"); int Opc = getDecodedBinaryOpcode(Record[0], CurTy); if (Opc < 0) { V = UndefValue::get(CurTy); // Unknown binop. } else { Constant *LHS = ValueList.getConstantFwdRef(Record[1], CurTy); Constant *RHS = ValueList.getConstantFwdRef(Record[2], CurTy); unsigned Flags = 0; if (Record.size() >= 4) { if (Opc == Instruction::Add || Opc == Instruction::Sub || Opc == Instruction::Mul || Opc == Instruction::Shl) { if (Record[3] & (1 << bitc::OBO_NO_SIGNED_WRAP)) Flags |= OverflowingBinaryOperator::NoSignedWrap; if (Record[3] & (1 << bitc::OBO_NO_UNSIGNED_WRAP)) Flags |= OverflowingBinaryOperator::NoUnsignedWrap; } else if (Opc == Instruction::SDiv || Opc == Instruction::UDiv || Opc == Instruction::LShr || Opc == Instruction::AShr) { if (Record[3] & (1 << bitc::PEO_EXACT)) Flags |= SDivOperator::IsExact; } } V = ConstantExpr::get(Opc, LHS, RHS, Flags); } break; } case bitc::CST_CODE_CE_CAST: { // CE_CAST: [opcode, opty, opval] if (Record.size() < 3) return error("Invalid record"); int Opc = getDecodedCastOpcode(Record[0]); if (Opc < 0) { V = UndefValue::get(CurTy); // Unknown cast. } else { Type *OpTy = getTypeByID(Record[1]); if (!OpTy) return error("Invalid record"); Constant *Op = ValueList.getConstantFwdRef(Record[2], OpTy); V = UpgradeBitCastExpr(Opc, Op, CurTy); if (!V) V = ConstantExpr::getCast(Opc, Op, CurTy); } break; } case bitc::CST_CODE_CE_INBOUNDS_GEP: case bitc::CST_CODE_CE_GEP: { // CE_GEP: [n x operands] unsigned OpNum = 0; Type *PointeeType = nullptr; if (Record.size() % 2) PointeeType = getTypeByID(Record[OpNum++]); SmallVector<Constant*, 16> Elts; while (OpNum != Record.size()) { Type *ElTy = getTypeByID(Record[OpNum++]); if (!ElTy) return error("Invalid record"); Elts.push_back(ValueList.getConstantFwdRef(Record[OpNum++], ElTy)); } if (PointeeType && PointeeType != cast<SequentialType>(Elts[0]->getType()->getScalarType()) ->getElementType()) return error("Explicit gep operator type does not match pointee type " "of pointer operand"); ArrayRef<Constant *> Indices(Elts.begin() + 1, Elts.end()); V = ConstantExpr::getGetElementPtr(PointeeType, Elts[0], Indices, BitCode == bitc::CST_CODE_CE_INBOUNDS_GEP); break; } case bitc::CST_CODE_CE_SELECT: { // CE_SELECT: [opval#, opval#, opval#] if (Record.size() < 3) return error("Invalid record"); Type *SelectorTy = Type::getInt1Ty(Context); // If CurTy is a vector of length n, then Record[0] must be a <n x i1> // vector. Otherwise, it must be a single bit. if (VectorType *VTy = dyn_cast<VectorType>(CurTy)) SelectorTy = VectorType::get(Type::getInt1Ty(Context), VTy->getNumElements()); V = ConstantExpr::getSelect(ValueList.getConstantFwdRef(Record[0], SelectorTy), ValueList.getConstantFwdRef(Record[1],CurTy), ValueList.getConstantFwdRef(Record[2],CurTy)); break; } case bitc::CST_CODE_CE_EXTRACTELT : { // CE_EXTRACTELT: [opty, opval, opty, opval] if (Record.size() < 3) return error("Invalid record"); VectorType *OpTy = dyn_cast_or_null<VectorType>(getTypeByID(Record[0])); if (!OpTy) return error("Invalid record"); Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy); Constant *Op1 = nullptr; if (Record.size() == 4) { Type *IdxTy = getTypeByID(Record[2]); if (!IdxTy) return error("Invalid record"); Op1 = ValueList.getConstantFwdRef(Record[3], IdxTy); } else // TODO: Remove with llvm 4.0 Op1 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context)); if (!Op1) return error("Invalid record"); V = ConstantExpr::getExtractElement(Op0, Op1); break; } case bitc::CST_CODE_CE_INSERTELT : { // CE_INSERTELT: [opval, opval, opty, opval] VectorType *OpTy = dyn_cast<VectorType>(CurTy); if (Record.size() < 3 || !OpTy) return error("Invalid record"); Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy); Constant *Op1 = ValueList.getConstantFwdRef(Record[1], OpTy->getElementType()); Constant *Op2 = nullptr; if (Record.size() == 4) { Type *IdxTy = getTypeByID(Record[2]); if (!IdxTy) return error("Invalid record"); Op2 = ValueList.getConstantFwdRef(Record[3], IdxTy); } else // TODO: Remove with llvm 4.0 Op2 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context)); if (!Op2) return error("Invalid record"); V = ConstantExpr::getInsertElement(Op0, Op1, Op2); break; } case bitc::CST_CODE_CE_SHUFFLEVEC: { // CE_SHUFFLEVEC: [opval, opval, opval] VectorType *OpTy = dyn_cast<VectorType>(CurTy); if (Record.size() < 3 || !OpTy) return error("Invalid record"); Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy); Constant *Op1 = ValueList.getConstantFwdRef(Record[1], OpTy); Type *ShufTy = VectorType::get(Type::getInt32Ty(Context), OpTy->getNumElements()); Constant *Op2 = ValueList.getConstantFwdRef(Record[2], ShufTy); V = ConstantExpr::getShuffleVector(Op0, Op1, Op2); break; } case bitc::CST_CODE_CE_SHUFVEC_EX: { // [opty, opval, opval, opval] VectorType *RTy = dyn_cast<VectorType>(CurTy); VectorType *OpTy = dyn_cast_or_null<VectorType>(getTypeByID(Record[0])); if (Record.size() < 4 || !RTy || !OpTy) return error("Invalid record"); Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy); Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy); Type *ShufTy = VectorType::get(Type::getInt32Ty(Context), RTy->getNumElements()); Constant *Op2 = ValueList.getConstantFwdRef(Record[3], ShufTy); V = ConstantExpr::getShuffleVector(Op0, Op1, Op2); break; } case bitc::CST_CODE_CE_CMP: { // CE_CMP: [opty, opval, opval, pred] if (Record.size() < 4) return error("Invalid record"); Type *OpTy = getTypeByID(Record[0]); if (!OpTy) return error("Invalid record"); Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy); Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy); if (OpTy->isFPOrFPVectorTy()) V = ConstantExpr::getFCmp(Record[3], Op0, Op1); else V = ConstantExpr::getICmp(Record[3], Op0, Op1); break; } // This maintains backward compatibility, pre-asm dialect keywords. // FIXME: Remove with the 4.0 release. case bitc::CST_CODE_INLINEASM_OLD: { if (Record.size() < 2) return error("Invalid record"); std::string AsmStr, ConstrStr; bool HasSideEffects = Record[0] & 1; bool IsAlignStack = Record[0] >> 1; unsigned AsmStrSize = Record[1]; if (2+AsmStrSize >= Record.size()) return error("Invalid record"); unsigned ConstStrSize = Record[2+AsmStrSize]; if (3+AsmStrSize+ConstStrSize > Record.size()) return error("Invalid record"); for (unsigned i = 0; i != AsmStrSize; ++i) AsmStr += (char)Record[2+i]; for (unsigned i = 0; i != ConstStrSize; ++i) ConstrStr += (char)Record[3+AsmStrSize+i]; PointerType *PTy = cast<PointerType>(CurTy); V = InlineAsm::get(cast<FunctionType>(PTy->getElementType()), AsmStr, ConstrStr, HasSideEffects, IsAlignStack); break; } // This version adds support for the asm dialect keywords (e.g., // inteldialect). case bitc::CST_CODE_INLINEASM: { if (Record.size() < 2) return error("Invalid record"); std::string AsmStr, ConstrStr; bool HasSideEffects = Record[0] & 1; bool IsAlignStack = (Record[0] >> 1) & 1; unsigned AsmDialect = Record[0] >> 2; unsigned AsmStrSize = Record[1]; if (2+AsmStrSize >= Record.size()) return error("Invalid record"); unsigned ConstStrSize = Record[2+AsmStrSize]; if (3+AsmStrSize+ConstStrSize > Record.size()) return error("Invalid record"); for (unsigned i = 0; i != AsmStrSize; ++i) AsmStr += (char)Record[2+i]; for (unsigned i = 0; i != ConstStrSize; ++i) ConstrStr += (char)Record[3+AsmStrSize+i]; PointerType *PTy = cast<PointerType>(CurTy); V = InlineAsm::get(cast<FunctionType>(PTy->getElementType()), AsmStr, ConstrStr, HasSideEffects, IsAlignStack, InlineAsm::AsmDialect(AsmDialect)); break; } case bitc::CST_CODE_BLOCKADDRESS:{ if (Record.size() < 3) return error("Invalid record"); Type *FnTy = getTypeByID(Record[0]); if (!FnTy) return error("Invalid record"); Function *Fn = dyn_cast_or_null<Function>(ValueList.getConstantFwdRef(Record[1],FnTy)); if (!Fn) return error("Invalid record"); // Don't let Fn get dematerialized. BlockAddressesTaken.insert(Fn); // If the function is already parsed we can insert the block address right // away. BasicBlock *BB; unsigned BBID = Record[2]; if (!BBID) // Invalid reference to entry block. return error("Invalid ID"); if (!Fn->empty()) { Function::iterator BBI = Fn->begin(), BBE = Fn->end(); for (size_t I = 0, E = BBID; I != E; ++I) { if (BBI == BBE) return error("Invalid ID"); ++BBI; } BB = BBI; } else { // Otherwise insert a placeholder and remember it so it can be inserted // when the function is parsed. auto &FwdBBs = BasicBlockFwdRefs[Fn]; if (FwdBBs.empty()) BasicBlockFwdRefQueue.push_back(Fn); if (FwdBBs.size() < BBID + 1) FwdBBs.resize(BBID + 1); if (!FwdBBs[BBID]) FwdBBs[BBID] = BasicBlock::Create(Context); BB = FwdBBs[BBID]; } V = BlockAddress::get(Fn, BB); break; } } ValueList.assignValue(V, NextCstNo); ++NextCstNo; } } std::error_code BitcodeReader::parseUseLists() { if (Stream.EnterSubBlock(bitc::USELIST_BLOCK_ID)) return error("Invalid record"); // Read all the records. SmallVector<uint64_t, 64> Record; while (1) { // HLSL Change Starts - count skipped blocks unsigned skipCount = 0; BitstreamEntry Entry = Stream.advanceSkippingSubblocks(0, &skipCount); if (skipCount) ReportWarning(DiagnosticHandler, "Unrecognized subblock"); // HLSL Change End switch (Entry.Kind) { case BitstreamEntry::SubBlock: // Handled for us already. case BitstreamEntry::Error: return error("Malformed block"); case BitstreamEntry::EndBlock: return std::error_code(); case BitstreamEntry::Record: // The interesting case. break; } // Read a use list record. Record.clear(); bool IsBB = false; switch (Stream.readRecord(Entry.ID, Record)) { default: // Default behavior: unknown type. break; case bitc::USELIST_CODE_BB: IsBB = true; LLVM_FALLTHROUGH; // HLSL Change case bitc::USELIST_CODE_DEFAULT: { unsigned RecordLength = Record.size(); if (RecordLength < 3) // Records should have at least an ID and two indexes. return error("Invalid record"); unsigned ID = Record.back(); Record.pop_back(); Value *V; if (IsBB) { assert(ID < FunctionBBs.size() && "Basic block not found"); V = FunctionBBs[ID]; } else V = ValueList[ID]; unsigned NumUses = 0; SmallDenseMap<const Use *, unsigned, 16> Order; for (const Use &U : V->uses()) { if (++NumUses > Record.size()) break; Order[&U] = Record[NumUses - 1]; } if (Order.size() != Record.size() || NumUses > Record.size()) // Mismatches can happen if the functions are being materialized lazily // (out-of-order), or a value has been upgraded. break; V->sortUseList([&](const Use &L, const Use &R) { return Order.lookup(&L) < Order.lookup(&R); }); break; } } } } /// When we see the block for metadata, remember where it is and then skip it. /// This lets us lazily deserialize the metadata. std::error_code BitcodeReader::rememberAndSkipMetadata() { // Save the current stream state. uint64_t CurBit = Stream.GetCurrentBitNo(); DeferredMetadataInfo.push_back(CurBit); // Skip over the block for now. if (Stream.SkipBlock()) return error("Invalid record"); return std::error_code(); } std::error_code BitcodeReader::materializeMetadata() { for (uint64_t BitPos : DeferredMetadataInfo) { // Move the bit stream to the saved position. Stream.JumpToBit(BitPos); if (std::error_code EC = parseMetadata()) return EC; } DeferredMetadataInfo.clear(); return std::error_code(); } void BitcodeReader::setStripDebugInfo() { StripDebugInfo = true; } /// When we see the block for a function body, remember where it is and then /// skip it. This lets us lazily deserialize the functions. std::error_code BitcodeReader::rememberAndSkipFunctionBody() { // Get the function we are talking about. if (FunctionsWithBodies.empty()) return error("Insufficient function protos"); Function *Fn = FunctionsWithBodies.back(); FunctionsWithBodies.pop_back(); // Save the current stream state. uint64_t CurBit = Stream.GetCurrentBitNo(); DeferredFunctionInfo[Fn] = CurBit; // Skip over the function block for now. if (Stream.SkipBlock()) return error("Invalid record"); return std::error_code(); } std::error_code BitcodeReader::globalCleanup() { // Patch the initializers for globals and aliases up. resolveGlobalAndAliasInits(); if (!GlobalInits.empty() || !AliasInits.empty()) return error("Malformed global initializer set"); // Look for intrinsic functions which need to be upgraded at some point for (Function &F : *TheModule) { Function *NewFn; if (UpgradeIntrinsicFunction(&F, NewFn)) UpgradedIntrinsics[&F] = NewFn; } // Look for global variables which need to be renamed. for (GlobalVariable &GV : TheModule->globals()) UpgradeGlobalVariable(&GV); // Force deallocation of memory for these vectors to favor the client that // want lazy deserialization. std::vector<std::pair<GlobalVariable*, unsigned> >().swap(GlobalInits); std::vector<std::pair<GlobalAlias*, unsigned> >().swap(AliasInits); return std::error_code(); } std::error_code BitcodeReader::parseModule(bool Resume, bool ShouldLazyLoadMetadata) { if (Resume) Stream.JumpToBit(NextUnreadBit); else if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID)) return error("Invalid record"); SmallVector<uint64_t, 64> Record; std::vector<std::string> SectionTable; std::vector<std::string> GCTable; // Read all the records for this module. while (1) { BitstreamEntry Entry = Stream.advance(); switch (Entry.Kind) { case BitstreamEntry::Error: return error("Malformed block"); case BitstreamEntry::EndBlock: return globalCleanup(); case BitstreamEntry::SubBlock: switch (Entry.ID) { default: // Skip unknown content. if (Stream.SkipBlock()) return error("Invalid record"); ReportWarning(DiagnosticHandler, "Unrecognized block found"); // HLSL Change - check for skipped blocks break; case bitc::BLOCKINFO_BLOCK_ID: // HLSL Changes Start -- check for skipped blocks unsigned count; count = 0; if (Stream.ReadBlockInfoBlock(&count)) return error("Malformed block"); if (count > 0) ReportWarning(DiagnosticHandler, "Unrecognized block found"); // HLSL Changes End break; case bitc::PARAMATTR_BLOCK_ID: if (std::error_code EC = parseAttributeBlock()) return EC; break; case bitc::PARAMATTR_GROUP_BLOCK_ID: if (std::error_code EC = parseAttributeGroupBlock()) return EC; break; case bitc::TYPE_BLOCK_ID_NEW: if (std::error_code EC = parseTypeTable()) return EC; break; case bitc::VALUE_SYMTAB_BLOCK_ID: if (std::error_code EC = parseValueSymbolTable()) return EC; SeenValueSymbolTable = true; break; case bitc::CONSTANTS_BLOCK_ID: if (std::error_code EC = parseConstants()) return EC; if (std::error_code EC = resolveGlobalAndAliasInits()) return EC; break; case bitc::METADATA_BLOCK_ID: if (ShouldLazyLoadMetadata && !IsMetadataMaterialized) { if (std::error_code EC = rememberAndSkipMetadata()) return EC; break; } assert(DeferredMetadataInfo.empty() && "Unexpected deferred metadata"); if (std::error_code EC = parseMetadata()) return EC; break; case bitc::FUNCTION_BLOCK_ID: // If this is the first function body we've seen, reverse the // FunctionsWithBodies list. if (!SeenFirstFunctionBody) { std::reverse(FunctionsWithBodies.begin(), FunctionsWithBodies.end()); if (std::error_code EC = globalCleanup()) return EC; SeenFirstFunctionBody = true; } if (std::error_code EC = rememberAndSkipFunctionBody()) return EC; // Suspend parsing when we reach the function bodies. Subsequent // materialization calls will resume it when necessary. If the bitcode // file is old, the symbol table will be at the end instead and will not // have been seen yet. In this case, just finish the parse now. if (SeenValueSymbolTable) { NextUnreadBit = Stream.GetCurrentBitNo(); return std::error_code(); } break; case bitc::USELIST_BLOCK_ID: if (std::error_code EC = parseUseLists()) return EC; break; } continue; case BitstreamEntry::Record: // The interesting case. break; } // Read a record. switch (Stream.readRecord(Entry.ID, Record)) { default: break; // Default behavior, ignore unknown content. case bitc::MODULE_CODE_VERSION: { // VERSION: [version#] if (Record.size() < 1) return error("Invalid record"); // Only version #0 and #1 are supported so far. unsigned module_version = Record[0]; switch (module_version) { default: return error("Invalid value"); case 0: UseRelativeIDs = false; break; case 1: UseRelativeIDs = true; break; } break; } case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N] std::string S; if (convertToString(Record, 0, S)) return error("Invalid record"); TheModule->setTargetTriple(S); break; } case bitc::MODULE_CODE_DATALAYOUT: { // DATALAYOUT: [strchr x N] std::string S; if (convertToString(Record, 0, S)) return error("Invalid record"); TheModule->setDataLayout(S); break; } case bitc::MODULE_CODE_ASM: { // ASM: [strchr x N] std::string S; if (convertToString(Record, 0, S)) return error("Invalid record"); TheModule->setModuleInlineAsm(S); break; } case bitc::MODULE_CODE_DEPLIB: { // DEPLIB: [strchr x N] // FIXME: Remove in 4.0. std::string S; if (convertToString(Record, 0, S)) return error("Invalid record"); // Ignore value. break; } case bitc::MODULE_CODE_SECTIONNAME: { // SECTIONNAME: [strchr x N] std::string S; if (convertToString(Record, 0, S)) return error("Invalid record"); SectionTable.push_back(S); break; } case bitc::MODULE_CODE_GCNAME: { // SECTIONNAME: [strchr x N] std::string S; if (convertToString(Record, 0, S)) return error("Invalid record"); GCTable.push_back(S); break; } case bitc::MODULE_CODE_COMDAT: { // COMDAT: [selection_kind, name] if (Record.size() < 2) return error("Invalid record"); Comdat::SelectionKind SK = getDecodedComdatSelectionKind(Record[0]); unsigned ComdatNameSize = Record[1]; std::string ComdatName; ComdatName.reserve(ComdatNameSize); for (unsigned i = 0; i != ComdatNameSize; ++i) ComdatName += (char)Record[2 + i]; Comdat *C = TheModule->getOrInsertComdat(ComdatName); C->setSelectionKind(SK); ComdatList.push_back(C); break; } // GLOBALVAR: [pointer type, isconst, initid, // linkage, alignment, section, visibility, threadlocal, // unnamed_addr, externally_initialized, dllstorageclass, // comdat] case bitc::MODULE_CODE_GLOBALVAR: { if (Record.size() < 6) return error("Invalid record"); Type *Ty = getTypeByID(Record[0]); if (!Ty) return error("Invalid record"); bool isConstant = Record[1] & 1; bool explicitType = Record[1] & 2; unsigned AddressSpace; if (explicitType) { AddressSpace = Record[1] >> 2; } else { if (!Ty->isPointerTy()) return error("Invalid type for value"); AddressSpace = cast<PointerType>(Ty)->getAddressSpace(); Ty = cast<PointerType>(Ty)->getElementType(); } uint64_t RawLinkage = Record[3]; GlobalValue::LinkageTypes Linkage = getDecodedLinkage(RawLinkage); unsigned Alignment; if (std::error_code EC = parseAlignmentValue(Record[4], Alignment)) return EC; std::string Section; if (Record[5]) { if (Record[5]-1 >= SectionTable.size()) return error("Invalid ID"); Section = SectionTable[Record[5]-1]; } GlobalValue::VisibilityTypes Visibility = GlobalValue::DefaultVisibility; // Local linkage must have default visibility. if (Record.size() > 6 && !GlobalValue::isLocalLinkage(Linkage)) // FIXME: Change to an error if non-default in 4.0. Visibility = getDecodedVisibility(Record[6]); GlobalVariable::ThreadLocalMode TLM = GlobalVariable::NotThreadLocal; if (Record.size() > 7) TLM = getDecodedThreadLocalMode(Record[7]); bool UnnamedAddr = false; if (Record.size() > 8) UnnamedAddr = Record[8]; bool ExternallyInitialized = false; if (Record.size() > 9) ExternallyInitialized = Record[9]; GlobalVariable *NewGV = new GlobalVariable(*TheModule, Ty, isConstant, Linkage, nullptr, "", nullptr, TLM, AddressSpace, ExternallyInitialized); NewGV->setAlignment(Alignment); if (!Section.empty()) NewGV->setSection(Section); NewGV->setVisibility(Visibility); NewGV->setUnnamedAddr(UnnamedAddr); if (Record.size() > 10) NewGV->setDLLStorageClass(getDecodedDLLStorageClass(Record[10])); else upgradeDLLImportExportLinkage(NewGV, RawLinkage); ValueList.push_back(NewGV); // Remember which value to use for the global initializer. if (unsigned InitID = Record[2]) GlobalInits.push_back(std::make_pair(NewGV, InitID-1)); if (Record.size() > 11) { if (unsigned ComdatID = Record[11]) { if (ComdatID > ComdatList.size()) return error("Invalid global variable comdat ID"); NewGV->setComdat(ComdatList[ComdatID - 1]); } } else if (hasImplicitComdat(RawLinkage)) { NewGV->setComdat(reinterpret_cast<Comdat *>(1)); } break; } // FUNCTION: [type, callingconv, isproto, linkage, paramattr, // alignment, section, visibility, gc, unnamed_addr, // prologuedata, dllstorageclass, comdat, prefixdata] case bitc::MODULE_CODE_FUNCTION: { if (Record.size() < 8) return error("Invalid record"); Type *Ty = getTypeByID(Record[0]); if (!Ty) return error("Invalid record"); if (auto *PTy = dyn_cast<PointerType>(Ty)) Ty = PTy->getElementType(); auto *FTy = dyn_cast<FunctionType>(Ty); if (!FTy) return error("Invalid type for value"); Function *Func = Function::Create(FTy, GlobalValue::ExternalLinkage, "", TheModule); Func->setCallingConv(static_cast<CallingConv::ID>(Record[1])); bool isProto = Record[2]; uint64_t RawLinkage = Record[3]; Func->setLinkage(getDecodedLinkage(RawLinkage)); Func->setAttributes(getAttributes(Record[4])); unsigned Alignment; if (std::error_code EC = parseAlignmentValue(Record[5], Alignment)) return EC; Func->setAlignment(Alignment); if (Record[6]) { if (Record[6]-1 >= SectionTable.size()) return error("Invalid ID"); Func->setSection(SectionTable[Record[6]-1]); } // Local linkage must have default visibility. if (!Func->hasLocalLinkage()) // FIXME: Change to an error if non-default in 4.0. Func->setVisibility(getDecodedVisibility(Record[7])); if (Record.size() > 8 && Record[8]) { if (Record[8]-1 >= GCTable.size()) return error("Invalid ID"); Func->setGC(GCTable[Record[8]-1].c_str()); } bool UnnamedAddr = false; if (Record.size() > 9) UnnamedAddr = Record[9]; Func->setUnnamedAddr(UnnamedAddr); if (Record.size() > 10 && Record[10] != 0) FunctionPrologues.push_back(std::make_pair(Func, Record[10]-1)); if (Record.size() > 11) Func->setDLLStorageClass(getDecodedDLLStorageClass(Record[11])); else upgradeDLLImportExportLinkage(Func, RawLinkage); if (Record.size() > 12) { if (unsigned ComdatID = Record[12]) { if (ComdatID > ComdatList.size()) return error("Invalid function comdat ID"); Func->setComdat(ComdatList[ComdatID - 1]); } } else if (hasImplicitComdat(RawLinkage)) { Func->setComdat(reinterpret_cast<Comdat *>(1)); } if (Record.size() > 13 && Record[13] != 0) FunctionPrefixes.push_back(std::make_pair(Func, Record[13]-1)); if (Record.size() > 14 && Record[14] != 0) FunctionPersonalityFns.push_back(std::make_pair(Func, Record[14] - 1)); ValueList.push_back(Func); // If this is a function with a body, remember the prototype we are // creating now, so that we can match up the body with them later. if (!isProto) { Func->setIsMaterializable(true); FunctionsWithBodies.push_back(Func); DeferredFunctionInfo[Func] = 0; } break; } // ALIAS: [alias type, aliasee val#, linkage] // ALIAS: [alias type, aliasee val#, linkage, visibility, dllstorageclass] case bitc::MODULE_CODE_ALIAS: { if (Record.size() < 3) return error("Invalid record"); Type *Ty = getTypeByID(Record[0]); if (!Ty) return error("Invalid record"); auto *PTy = dyn_cast<PointerType>(Ty); if (!PTy) return error("Invalid type for value"); auto *NewGA = GlobalAlias::create(PTy, getDecodedLinkage(Record[2]), "", TheModule); // Old bitcode files didn't have visibility field. // Local linkage must have default visibility. if (Record.size() > 3 && !NewGA->hasLocalLinkage()) // FIXME: Change to an error if non-default in 4.0. NewGA->setVisibility(getDecodedVisibility(Record[3])); if (Record.size() > 4) NewGA->setDLLStorageClass(getDecodedDLLStorageClass(Record[4])); else upgradeDLLImportExportLinkage(NewGA, Record[2]); if (Record.size() > 5) NewGA->setThreadLocalMode(getDecodedThreadLocalMode(Record[5])); if (Record.size() > 6) NewGA->setUnnamedAddr(Record[6]); ValueList.push_back(NewGA); AliasInits.push_back(std::make_pair(NewGA, Record[1])); break; } /// MODULE_CODE_PURGEVALS: [numvals] case bitc::MODULE_CODE_PURGEVALS: // Trim down the value list to the specified size. if (Record.size() < 1 || Record[0] > ValueList.size()) return error("Invalid record"); ValueList.shrinkTo(Record[0]); break; } Record.clear(); } } std::error_code BitcodeReader::parseBitcodeInto(std::unique_ptr<DataStreamer> Streamer, Module *M, bool ShouldLazyLoadMetadata) { TheModule = M; if (std::error_code EC = initStream(std::move(Streamer))) return EC; // Sniff for the signature. if (Stream.Read(8) != 'B' || Stream.Read(8) != 'C' || Stream.Read(4) != 0x0 || Stream.Read(4) != 0xC || Stream.Read(4) != 0xE || Stream.Read(4) != 0xD) return error("Invalid bitcode signature"); // We expect a number of well-defined blocks, though we don't necessarily // need to understand them all. while (1) { if (Stream.AtEndOfStream()) { return error("Malformed IR file"); } BitstreamEntry Entry = Stream.advance(BitstreamCursor::AF_DontAutoprocessAbbrevs); if (Entry.Kind != BitstreamEntry::SubBlock) return error("Malformed block"); // HLSL Change - process the complete stream. if (Entry.ID == bitc::MODULE_BLOCK_ID) { return parseModule(false, ShouldLazyLoadMetadata); } else { ReportWarning(DiagnosticHandler, "Unrecognized block found"); if (Stream.SkipBlock()) return error("Invalid record"); } // HLSL Change Ends - process the complete stream. } } ErrorOr<std::string> BitcodeReader::parseModuleTriple() { if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID)) return error("Invalid record"); SmallVector<uint64_t, 64> Record; std::string Triple; // Read all the records for this module. while (1) { // HLSL Change Starts - count skipped blocks unsigned skipCount = 0; BitstreamEntry Entry = Stream.advanceSkippingSubblocks(0, &skipCount); if (skipCount) ReportWarning(DiagnosticHandler, "Unrecognized subblock"); // HLSL Change End switch (Entry.Kind) { case BitstreamEntry::SubBlock: // Handled for us already. case BitstreamEntry::Error: return error("Malformed block"); case BitstreamEntry::EndBlock: return Triple; case BitstreamEntry::Record: // The interesting case. break; } // Read a record. switch (Stream.readRecord(Entry.ID, Record)) { default: break; // Default behavior, ignore unknown content. case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N] std::string S; if (convertToString(Record, 0, S)) return error("Invalid record"); Triple = S; break; } } Record.clear(); } llvm_unreachable("Exit infinite loop"); } ErrorOr<std::string> BitcodeReader::parseTriple() { if (std::error_code EC = initStream(nullptr)) return EC; // Sniff for the signature. if (Stream.Read(8) != 'B' || Stream.Read(8) != 'C' || Stream.Read(4) != 0x0 || Stream.Read(4) != 0xC || Stream.Read(4) != 0xE || Stream.Read(4) != 0xD) return error("Invalid bitcode signature"); // We expect a number of well-defined blocks, though we don't necessarily // need to understand them all. while (1) { BitstreamEntry Entry = Stream.advance(); switch (Entry.Kind) { case BitstreamEntry::Error: return error("Malformed block"); case BitstreamEntry::EndBlock: return std::error_code(); case BitstreamEntry::SubBlock: if (Entry.ID == bitc::MODULE_BLOCK_ID) return parseModuleTriple(); // Ignore other sub-blocks. if (Stream.SkipBlock()) return error("Malformed block"); ReportWarning(DiagnosticHandler, "Unrecognized block found"); continue; case BitstreamEntry::Record: Stream.skipRecord(Entry.ID); continue; } } } /// Parse metadata attachments. std::error_code BitcodeReader::parseMetadataAttachment(Function &F) { if (Stream.EnterSubBlock(bitc::METADATA_ATTACHMENT_ID)) return error("Invalid record"); SmallVector<uint64_t, 64> Record; while (1) { // HLSL Change Starts - count skipped blocks unsigned skipCount = 0; BitstreamEntry Entry = Stream.advanceSkippingSubblocks(0, &skipCount); if (skipCount) ReportWarning(DiagnosticHandler, "Unrecognized subblock"); // HLSL Change End switch (Entry.Kind) { case BitstreamEntry::SubBlock: // Handled for us already. case BitstreamEntry::Error: return error("Malformed block"); case BitstreamEntry::EndBlock: return std::error_code(); case BitstreamEntry::Record: // The interesting case. break; } // Read a metadata attachment record. Record.clear(); switch (Stream.readRecord(Entry.ID, Record)) { default: // Default behavior: ignore. break; case bitc::METADATA_ATTACHMENT: { unsigned RecordLength = Record.size(); if (Record.empty()) return error("Invalid record"); if (RecordLength % 2 == 0) { // A function attachment. for (unsigned I = 0; I != RecordLength; I += 2) { auto K = MDKindMap.find(Record[I]); if (K == MDKindMap.end()) return error("Invalid ID"); Metadata *MD = MDValueList.getValueFwdRef(Record[I + 1]); F.setMetadata(K->second, cast<MDNode>(MD)); } continue; } // An instruction attachment. Instruction *Inst = InstructionList[Record[0]]; for (unsigned i = 1; i != RecordLength; i = i+2) { unsigned Kind = Record[i]; DenseMap<unsigned, unsigned>::iterator I = MDKindMap.find(Kind); if (I == MDKindMap.end()) return error("Invalid ID"); Metadata *Node = MDValueList.getValueFwdRef(Record[i + 1]); if (isa<LocalAsMetadata>(Node)) // Drop the attachment. This used to be legal, but there's no // upgrade path. break; Inst->setMetadata(I->second, cast<MDNode>(Node)); if (I->second == LLVMContext::MD_tbaa) InstsWithTBAATag.push_back(Inst); } break; } } } } static std::error_code typeCheckLoadStoreInst(DiagnosticHandlerFunction DH, Type *ValType, Type *PtrType) { if (!isa<PointerType>(PtrType)) return error(DH, "Load/Store operand is not a pointer type"); Type *ElemType = cast<PointerType>(PtrType)->getElementType(); if (ValType && ValType != ElemType) return error(DH, "Explicit load/store type does not match pointee type of " "pointer operand"); if (!PointerType::isLoadableOrStorableType(ElemType)) return error(DH, "Cannot load/store from pointer"); return std::error_code(); } /// Lazily parse the specified function body block. std::error_code BitcodeReader::parseFunctionBody(Function *F) { if (Stream.EnterSubBlock(bitc::FUNCTION_BLOCK_ID)) return error("Invalid record"); InstructionList.clear(); unsigned ModuleValueListSize = ValueList.size(); unsigned ModuleMDValueListSize = MDValueList.size(); // Add all the function arguments to the value table. for(Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I) ValueList.push_back(I); unsigned NextValueNo = ValueList.size(); BasicBlock *CurBB = nullptr; unsigned CurBBNo = 0; DebugLoc LastLoc; auto getLastInstruction = [&]() -> Instruction * { if (CurBB && !CurBB->empty()) return &CurBB->back(); else if (CurBBNo && FunctionBBs[CurBBNo - 1] && !FunctionBBs[CurBBNo - 1]->empty()) return &FunctionBBs[CurBBNo - 1]->back(); return nullptr; }; // Read all the records. SmallVector<uint64_t, 64> Record; while (1) { BitstreamEntry Entry = Stream.advance(); switch (Entry.Kind) { case BitstreamEntry::Error: return error("Malformed block"); case BitstreamEntry::EndBlock: goto OutOfRecordLoop; case BitstreamEntry::SubBlock: switch (Entry.ID) { default: // Skip unknown content. if (Stream.SkipBlock()) return error("Invalid record"); ReportWarning(DiagnosticHandler, "Unrecognized block found"); break; case bitc::CONSTANTS_BLOCK_ID: if (std::error_code EC = parseConstants()) return EC; NextValueNo = ValueList.size(); break; case bitc::VALUE_SYMTAB_BLOCK_ID: if (std::error_code EC = parseValueSymbolTable()) return EC; break; case bitc::METADATA_ATTACHMENT_ID: if (std::error_code EC = parseMetadataAttachment(*F)) return EC; break; case bitc::METADATA_BLOCK_ID: if (std::error_code EC = parseMetadata()) return EC; break; case bitc::USELIST_BLOCK_ID: if (std::error_code EC = parseUseLists()) return EC; break; } continue; case BitstreamEntry::Record: // The interesting case. break; } // Read a record. Record.clear(); Instruction *I = nullptr; unsigned BitCode = Stream.readRecord(Entry.ID, Record); switch (BitCode) { default: // Default behavior: reject return error("Invalid value"); case bitc::FUNC_CODE_DECLAREBLOCKS: { // DECLAREBLOCKS: [nblocks] if (Record.size() < 1 || Record[0] == 0) return error("Invalid record"); // Create all the basic blocks for the function. FunctionBBs.resize(Record[0]); // See if anything took the address of blocks in this function. auto BBFRI = BasicBlockFwdRefs.find(F); if (BBFRI == BasicBlockFwdRefs.end()) { for (unsigned i = 0, e = FunctionBBs.size(); i != e; ++i) FunctionBBs[i] = BasicBlock::Create(Context, "", F); } else { auto &BBRefs = BBFRI->second; // Check for invalid basic block references. if (BBRefs.size() > FunctionBBs.size()) return error("Invalid ID"); assert(!BBRefs.empty() && "Unexpected empty array"); assert(!BBRefs.front() && "Invalid reference to entry block"); for (unsigned I = 0, E = FunctionBBs.size(), RE = BBRefs.size(); I != E; ++I) if (I < RE && BBRefs[I]) { BBRefs[I]->insertInto(F); FunctionBBs[I] = BBRefs[I]; } else { FunctionBBs[I] = BasicBlock::Create(Context, "", F); } // Erase from the table. BasicBlockFwdRefs.erase(BBFRI); } CurBB = FunctionBBs[0]; continue; } case bitc::FUNC_CODE_DEBUG_LOC_AGAIN: // DEBUG_LOC_AGAIN // This record indicates that the last instruction is at the same // location as the previous instruction with a location. I = getLastInstruction(); if (!I) return error("Invalid record"); I->setDebugLoc(LastLoc); I = nullptr; continue; case bitc::FUNC_CODE_DEBUG_LOC: { // DEBUG_LOC: [line, col, scope, ia] I = getLastInstruction(); if (!I || Record.size() < 4) return error("Invalid record"); unsigned Line = Record[0], Col = Record[1]; unsigned ScopeID = Record[2], IAID = Record[3]; MDNode *Scope = nullptr, *IA = nullptr; if (ScopeID) Scope = cast<MDNode>(MDValueList.getValueFwdRef(ScopeID-1)); if (IAID) IA = cast<MDNode>(MDValueList.getValueFwdRef(IAID-1)); LastLoc = DebugLoc::get(Line, Col, Scope, IA); I->setDebugLoc(LastLoc); I = nullptr; continue; } case bitc::FUNC_CODE_INST_BINOP: { // BINOP: [opval, ty, opval, opcode] unsigned OpNum = 0; Value *LHS, *RHS; if (getValueTypePair(Record, OpNum, NextValueNo, LHS) || popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS) || OpNum+1 > Record.size()) return error("Invalid record"); int Opc = getDecodedBinaryOpcode(Record[OpNum++], LHS->getType()); if (Opc == -1) return error("Invalid record"); I = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); InstructionList.push_back(I); if (OpNum < Record.size()) { if (Opc == Instruction::Add || Opc == Instruction::Sub || Opc == Instruction::Mul || Opc == Instruction::Shl) { if (Record[OpNum] & (1 << bitc::OBO_NO_SIGNED_WRAP)) cast<BinaryOperator>(I)->setHasNoSignedWrap(true); if (Record[OpNum] & (1 << bitc::OBO_NO_UNSIGNED_WRAP)) cast<BinaryOperator>(I)->setHasNoUnsignedWrap(true); } else if (Opc == Instruction::SDiv || Opc == Instruction::UDiv || Opc == Instruction::LShr || Opc == Instruction::AShr) { if (Record[OpNum] & (1 << bitc::PEO_EXACT)) cast<BinaryOperator>(I)->setIsExact(true); } else if (isa<FPMathOperator>(I)) { FastMathFlags FMF = getDecodedFastMathFlags(Record[OpNum]); if (FMF.any()) I->setFastMathFlags(FMF); } } break; } case bitc::FUNC_CODE_INST_CAST: { // CAST: [opval, opty, destty, castopc] unsigned OpNum = 0; Value *Op; if (getValueTypePair(Record, OpNum, NextValueNo, Op) || OpNum+2 != Record.size()) return error("Invalid record"); Type *ResTy = getTypeByID(Record[OpNum]); int Opc = getDecodedCastOpcode(Record[OpNum + 1]); if (Opc == -1 || !ResTy) return error("Invalid record"); Instruction *Temp = nullptr; if ((I = UpgradeBitCastInst(Opc, Op, ResTy, Temp))) { if (Temp) { InstructionList.push_back(Temp); CurBB->getInstList().push_back(Temp); } } else { I = CastInst::Create((Instruction::CastOps)Opc, Op, ResTy); } InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_INBOUNDS_GEP_OLD: case bitc::FUNC_CODE_INST_GEP_OLD: case bitc::FUNC_CODE_INST_GEP: { // GEP: type, [n x operands] unsigned OpNum = 0; Type *Ty; bool InBounds; if (BitCode == bitc::FUNC_CODE_INST_GEP) { InBounds = Record[OpNum++]; Ty = getTypeByID(Record[OpNum++]); } else { InBounds = BitCode == bitc::FUNC_CODE_INST_INBOUNDS_GEP_OLD; Ty = nullptr; } Value *BasePtr; if (getValueTypePair(Record, OpNum, NextValueNo, BasePtr)) return error("Invalid record"); if (!Ty) Ty = cast<SequentialType>(BasePtr->getType()->getScalarType()) ->getElementType(); else if (Ty != cast<SequentialType>(BasePtr->getType()->getScalarType()) ->getElementType()) return error( "Explicit gep type does not match pointee type of pointer operand"); SmallVector<Value*, 16> GEPIdx; while (OpNum != Record.size()) { Value *Op; if (getValueTypePair(Record, OpNum, NextValueNo, Op)) return error("Invalid record"); GEPIdx.push_back(Op); } I = GetElementPtrInst::Create(Ty, BasePtr, GEPIdx); InstructionList.push_back(I); if (InBounds) cast<GetElementPtrInst>(I)->setIsInBounds(true); break; } case bitc::FUNC_CODE_INST_EXTRACTVAL: { // EXTRACTVAL: [opty, opval, n x indices] unsigned OpNum = 0; Value *Agg; if (getValueTypePair(Record, OpNum, NextValueNo, Agg)) return error("Invalid record"); unsigned RecSize = Record.size(); if (OpNum == RecSize) return error("EXTRACTVAL: Invalid instruction with 0 indices"); SmallVector<unsigned, 4> EXTRACTVALIdx; Type *CurTy = Agg->getType(); for (; OpNum != RecSize; ++OpNum) { bool IsArray = CurTy->isArrayTy(); bool IsStruct = CurTy->isStructTy(); uint64_t Index = Record[OpNum]; if (!IsStruct && !IsArray) return error("EXTRACTVAL: Invalid type"); if ((unsigned)Index != Index) return error("Invalid value"); if (IsStruct && Index >= CurTy->subtypes().size()) return error("EXTRACTVAL: Invalid struct index"); if (IsArray && Index >= CurTy->getArrayNumElements()) return error("EXTRACTVAL: Invalid array index"); EXTRACTVALIdx.push_back((unsigned)Index); if (IsStruct) CurTy = CurTy->subtypes()[Index]; else CurTy = CurTy->subtypes()[0]; } I = ExtractValueInst::Create(Agg, EXTRACTVALIdx); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_INSERTVAL: { // INSERTVAL: [opty, opval, opty, opval, n x indices] unsigned OpNum = 0; Value *Agg; if (getValueTypePair(Record, OpNum, NextValueNo, Agg)) return error("Invalid record"); Value *Val; if (getValueTypePair(Record, OpNum, NextValueNo, Val)) return error("Invalid record"); unsigned RecSize = Record.size(); if (OpNum == RecSize) return error("INSERTVAL: Invalid instruction with 0 indices"); SmallVector<unsigned, 4> INSERTVALIdx; Type *CurTy = Agg->getType(); for (; OpNum != RecSize; ++OpNum) { bool IsArray = CurTy->isArrayTy(); bool IsStruct = CurTy->isStructTy(); uint64_t Index = Record[OpNum]; if (!IsStruct && !IsArray) return error("INSERTVAL: Invalid type"); if ((unsigned)Index != Index) return error("Invalid value"); if (IsStruct && Index >= CurTy->subtypes().size()) return error("INSERTVAL: Invalid struct index"); if (IsArray && Index >= CurTy->getArrayNumElements()) return error("INSERTVAL: Invalid array index"); INSERTVALIdx.push_back((unsigned)Index); if (IsStruct) CurTy = CurTy->subtypes()[Index]; else CurTy = CurTy->subtypes()[0]; } if (CurTy != Val->getType()) return error("Inserted value type doesn't match aggregate type"); I = InsertValueInst::Create(Agg, Val, INSERTVALIdx); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_SELECT: { // SELECT: [opval, ty, opval, opval] // obsolete form of select // handles select i1 ... in old bitcode unsigned OpNum = 0; Value *TrueVal, *FalseVal, *Cond; if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) || popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) || popValue(Record, OpNum, NextValueNo, Type::getInt1Ty(Context), Cond)) return error("Invalid record"); I = SelectInst::Create(Cond, TrueVal, FalseVal); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_VSELECT: {// VSELECT: [ty,opval,opval,predty,pred] // new form of select // handles select i1 or select [N x i1] unsigned OpNum = 0; Value *TrueVal, *FalseVal, *Cond; if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) || popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) || getValueTypePair(Record, OpNum, NextValueNo, Cond)) return error("Invalid record"); // select condition can be either i1 or [N x i1] if (VectorType* vector_type = dyn_cast<VectorType>(Cond->getType())) { // expect <n x i1> if (vector_type->getElementType() != Type::getInt1Ty(Context)) return error("Invalid type for value"); } else { // expect i1 if (Cond->getType() != Type::getInt1Ty(Context)) return error("Invalid type for value"); } I = SelectInst::Create(Cond, TrueVal, FalseVal); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_EXTRACTELT: { // EXTRACTELT: [opty, opval, opval] unsigned OpNum = 0; Value *Vec, *Idx; if (getValueTypePair(Record, OpNum, NextValueNo, Vec) || getValueTypePair(Record, OpNum, NextValueNo, Idx)) return error("Invalid record"); if (!Vec->getType()->isVectorTy()) return error("Invalid type for value"); I = ExtractElementInst::Create(Vec, Idx); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_INSERTELT: { // INSERTELT: [ty, opval,opval,opval] unsigned OpNum = 0; Value *Vec, *Elt, *Idx; if (getValueTypePair(Record, OpNum, NextValueNo, Vec)) return error("Invalid record"); if (!Vec->getType()->isVectorTy()) return error("Invalid type for value"); if (popValue(Record, OpNum, NextValueNo, cast<VectorType>(Vec->getType())->getElementType(), Elt) || getValueTypePair(Record, OpNum, NextValueNo, Idx)) return error("Invalid record"); I = InsertElementInst::Create(Vec, Elt, Idx); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_SHUFFLEVEC: {// SHUFFLEVEC: [opval,ty,opval,opval] unsigned OpNum = 0; Value *Vec1, *Vec2, *Mask; if (getValueTypePair(Record, OpNum, NextValueNo, Vec1) || popValue(Record, OpNum, NextValueNo, Vec1->getType(), Vec2)) return error("Invalid record"); if (getValueTypePair(Record, OpNum, NextValueNo, Mask)) return error("Invalid record"); if (!Vec1->getType()->isVectorTy() || !Vec2->getType()->isVectorTy()) return error("Invalid type for value"); I = new ShuffleVectorInst(Vec1, Vec2, Mask); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_CMP: // CMP: [opty, opval, opval, pred] // Old form of ICmp/FCmp returning bool // Existed to differentiate between icmp/fcmp and vicmp/vfcmp which were // both legal on vectors but had different behaviour. case bitc::FUNC_CODE_INST_CMP2: { // CMP2: [opty, opval, opval, pred] // FCmp/ICmp returning bool or vector of bool unsigned OpNum = 0; Value *LHS, *RHS; if (getValueTypePair(Record, OpNum, NextValueNo, LHS) || popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS)) return error("Invalid record"); unsigned PredVal = Record[OpNum]; bool IsFP = LHS->getType()->isFPOrFPVectorTy(); FastMathFlags FMF; if (IsFP && Record.size() > OpNum+1) FMF = getDecodedFastMathFlags(Record[++OpNum]); if (OpNum+1 != Record.size()) return error("Invalid record"); if (LHS->getType()->isFPOrFPVectorTy()) I = new FCmpInst((FCmpInst::Predicate)PredVal, LHS, RHS); else I = new ICmpInst((ICmpInst::Predicate)PredVal, LHS, RHS); if (FMF.any()) I->setFastMathFlags(FMF); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_RET: // RET: [opty,opval<optional>] { unsigned Size = Record.size(); if (Size == 0) { I = ReturnInst::Create(Context); InstructionList.push_back(I); break; } unsigned OpNum = 0; Value *Op = nullptr; if (getValueTypePair(Record, OpNum, NextValueNo, Op)) return error("Invalid record"); if (OpNum != Record.size()) return error("Invalid record"); I = ReturnInst::Create(Context, Op); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_BR: { // BR: [bb#, bb#, opval] or [bb#] if (Record.size() != 1 && Record.size() != 3) return error("Invalid record"); BasicBlock *TrueDest = getBasicBlock(Record[0]); if (!TrueDest) return error("Invalid record"); if (Record.size() == 1) { I = BranchInst::Create(TrueDest); InstructionList.push_back(I); } else { BasicBlock *FalseDest = getBasicBlock(Record[1]); Value *Cond = getValue(Record, 2, NextValueNo, Type::getInt1Ty(Context)); if (!FalseDest || !Cond) return error("Invalid record"); I = BranchInst::Create(TrueDest, FalseDest, Cond); InstructionList.push_back(I); } break; } case bitc::FUNC_CODE_INST_SWITCH: { // SWITCH: [opty, op0, op1, ...] // Check magic if ((Record[0] >> 16) == SWITCH_INST_MAGIC) { // "New" SwitchInst format with case ranges. The changes to write this // format were reverted but we still recognize bitcode that uses it. // Hopefully someday we will have support for case ranges and can use // this format again. Type *OpTy = getTypeByID(Record[1]); unsigned ValueBitWidth = cast<IntegerType>(OpTy)->getBitWidth(); Value *Cond = getValue(Record, 2, NextValueNo, OpTy); BasicBlock *Default = getBasicBlock(Record[3]); if (!OpTy || !Cond || !Default) return error("Invalid record"); unsigned NumCases = Record[4]; SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases); InstructionList.push_back(SI); unsigned CurIdx = 5; for (unsigned i = 0; i != NumCases; ++i) { SmallVector<ConstantInt*, 1> CaseVals; unsigned NumItems = Record[CurIdx++]; for (unsigned ci = 0; ci != NumItems; ++ci) { bool isSingleNumber = Record[CurIdx++]; APInt Low; unsigned ActiveWords = 1; if (ValueBitWidth > 64) ActiveWords = Record[CurIdx++]; Low = readWideAPInt(makeArrayRef(&Record[CurIdx], ActiveWords), ValueBitWidth); CurIdx += ActiveWords; if (!isSingleNumber) { ActiveWords = 1; if (ValueBitWidth > 64) ActiveWords = Record[CurIdx++]; APInt High = readWideAPInt( makeArrayRef(&Record[CurIdx], ActiveWords), ValueBitWidth); CurIdx += ActiveWords; // FIXME: It is not clear whether values in the range should be // compared as signed or unsigned values. The partially // implemented changes that used this format in the past used // unsigned comparisons. for ( ; Low.ule(High); ++Low) CaseVals.push_back(ConstantInt::get(Context, Low)); } else CaseVals.push_back(ConstantInt::get(Context, Low)); } BasicBlock *DestBB = getBasicBlock(Record[CurIdx++]); for (SmallVector<ConstantInt*, 1>::iterator cvi = CaseVals.begin(), cve = CaseVals.end(); cvi != cve; ++cvi) SI->addCase(*cvi, DestBB); } I = SI; break; } // Old SwitchInst format without case ranges. if (Record.size() < 3 || (Record.size() & 1) == 0) return error("Invalid record"); Type *OpTy = getTypeByID(Record[0]); Value *Cond = getValue(Record, 1, NextValueNo, OpTy); BasicBlock *Default = getBasicBlock(Record[2]); if (!OpTy || !Cond || !Default) return error("Invalid record"); unsigned NumCases = (Record.size()-3)/2; SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases); InstructionList.push_back(SI); for (unsigned i = 0, e = NumCases; i != e; ++i) { ConstantInt *CaseVal = dyn_cast_or_null<ConstantInt>(getFnValueByID(Record[3+i*2], OpTy)); BasicBlock *DestBB = getBasicBlock(Record[1+3+i*2]); if (!CaseVal || !DestBB) { delete SI; return error("Invalid record"); } SI->addCase(CaseVal, DestBB); } I = SI; break; } case bitc::FUNC_CODE_INST_INDIRECTBR: { // INDIRECTBR: [opty, op0, op1, ...] if (Record.size() < 2) return error("Invalid record"); Type *OpTy = getTypeByID(Record[0]); Value *Address = getValue(Record, 1, NextValueNo, OpTy); if (!OpTy || !Address) return error("Invalid record"); unsigned NumDests = Record.size()-2; IndirectBrInst *IBI = IndirectBrInst::Create(Address, NumDests); InstructionList.push_back(IBI); for (unsigned i = 0, e = NumDests; i != e; ++i) { if (BasicBlock *DestBB = getBasicBlock(Record[2+i])) { IBI->addDestination(DestBB); } else { delete IBI; return error("Invalid record"); } } I = IBI; break; } case bitc::FUNC_CODE_INST_INVOKE: { // INVOKE: [attrs, cc, normBB, unwindBB, fnty, op0,op1,op2, ...] if (Record.size() < 4) return error("Invalid record"); unsigned OpNum = 0; AttributeSet PAL = getAttributes(Record[OpNum++]); unsigned CCInfo = Record[OpNum++]; BasicBlock *NormalBB = getBasicBlock(Record[OpNum++]); BasicBlock *UnwindBB = getBasicBlock(Record[OpNum++]); FunctionType *FTy = nullptr; if (CCInfo >> 13 & 1 && !(FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++])))) return error("Explicit invoke type is not a function type"); Value *Callee; if (getValueTypePair(Record, OpNum, NextValueNo, Callee)) return error("Invalid record"); PointerType *CalleeTy = dyn_cast<PointerType>(Callee->getType()); if (!CalleeTy) return error("Callee is not a pointer"); if (!FTy) { FTy = dyn_cast<FunctionType>(CalleeTy->getElementType()); if (!FTy) return error("Callee is not of pointer to function type"); } else if (CalleeTy->getElementType() != FTy) return error("Explicit invoke type does not match pointee type of " "callee operand"); if (Record.size() < FTy->getNumParams() + OpNum) return error("Insufficient operands to call"); SmallVector<Value*, 16> Ops; for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) { Ops.push_back(getValue(Record, OpNum, NextValueNo, FTy->getParamType(i))); if (!Ops.back()) return error("Invalid record"); } if (!FTy->isVarArg()) { if (Record.size() != OpNum) return error("Invalid record"); } else { // Read type/value pairs for varargs params. while (OpNum != Record.size()) { Value *Op; if (getValueTypePair(Record, OpNum, NextValueNo, Op)) return error("Invalid record"); Ops.push_back(Op); } } I = InvokeInst::Create(Callee, NormalBB, UnwindBB, Ops); InstructionList.push_back(I); cast<InvokeInst>(I) ->setCallingConv(static_cast<CallingConv::ID>(~(1U << 13) & CCInfo)); cast<InvokeInst>(I)->setAttributes(PAL); break; } case bitc::FUNC_CODE_INST_RESUME: { // RESUME: [opval] unsigned Idx = 0; Value *Val = nullptr; if (getValueTypePair(Record, Idx, NextValueNo, Val)) return error("Invalid record"); I = ResumeInst::Create(Val); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_UNREACHABLE: // UNREACHABLE I = new UnreachableInst(Context); InstructionList.push_back(I); break; case bitc::FUNC_CODE_INST_PHI: { // PHI: [ty, val0,bb0, ...] if (Record.size() < 1 || ((Record.size()-1)&1)) return error("Invalid record"); Type *Ty = getTypeByID(Record[0]); if (!Ty) return error("Invalid record"); PHINode *PN = PHINode::Create(Ty, (Record.size()-1)/2); InstructionList.push_back(PN); for (unsigned i = 0, e = Record.size()-1; i != e; i += 2) { Value *V; // With the new function encoding, it is possible that operands have // negative IDs (for forward references). Use a signed VBR // representation to keep the encoding small. if (UseRelativeIDs) V = getValueSigned(Record, 1+i, NextValueNo, Ty); else V = getValue(Record, 1+i, NextValueNo, Ty); BasicBlock *BB = getBasicBlock(Record[2+i]); if (!V || !BB) return error("Invalid record"); PN->addIncoming(V, BB); } I = PN; break; } case bitc::FUNC_CODE_INST_LANDINGPAD: case bitc::FUNC_CODE_INST_LANDINGPAD_OLD: { // LANDINGPAD: [ty, val, val, num, (id0,val0 ...)?] unsigned Idx = 0; if (BitCode == bitc::FUNC_CODE_INST_LANDINGPAD) { if (Record.size() < 3) return error("Invalid record"); } else { assert(BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD); if (Record.size() < 4) return error("Invalid record"); } Type *Ty = getTypeByID(Record[Idx++]); if (!Ty) return error("Invalid record"); if (BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD) { Value *PersFn = nullptr; if (getValueTypePair(Record, Idx, NextValueNo, PersFn)) return error("Invalid record"); if (!F->hasPersonalityFn()) F->setPersonalityFn(cast<Constant>(PersFn)); else if (F->getPersonalityFn() != cast<Constant>(PersFn)) return error("Personality function mismatch"); } bool IsCleanup = !!Record[Idx++]; unsigned NumClauses = Record[Idx++]; LandingPadInst *LP = LandingPadInst::Create(Ty, NumClauses); LP->setCleanup(IsCleanup); for (unsigned J = 0; J != NumClauses; ++J) { LandingPadInst::ClauseType CT = LandingPadInst::ClauseType(Record[Idx++]); (void)CT; Value *Val; if (getValueTypePair(Record, Idx, NextValueNo, Val)) { delete LP; return error("Invalid record"); } assert((CT != LandingPadInst::Catch || !isa<ArrayType>(Val->getType())) && "Catch clause has a invalid type!"); assert((CT != LandingPadInst::Filter || isa<ArrayType>(Val->getType())) && "Filter clause has invalid type!"); LP->addClause(cast<Constant>(Val)); } I = LP; InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_ALLOCA: { // ALLOCA: [instty, opty, op, align] if (Record.size() != 4) return error("Invalid record"); uint64_t AlignRecord = Record[3]; const uint64_t InAllocaMask = uint64_t(1) << 5; const uint64_t ExplicitTypeMask = uint64_t(1) << 6; const uint64_t FlagMask = InAllocaMask | ExplicitTypeMask; bool InAlloca = AlignRecord & InAllocaMask; Type *Ty = getTypeByID(Record[0]); if ((AlignRecord & ExplicitTypeMask) == 0) { auto *PTy = dyn_cast_or_null<PointerType>(Ty); if (!PTy) return error("Old-style alloca with a non-pointer type"); Ty = PTy->getElementType(); } Type *OpTy = getTypeByID(Record[1]); Value *Size = getFnValueByID(Record[2], OpTy); unsigned Align; if (std::error_code EC = parseAlignmentValue(AlignRecord & ~FlagMask, Align)) { return EC; } if (!Ty || !Size) return error("Invalid record"); AllocaInst *AI = new AllocaInst(Ty, Size, Align); AI->setUsedWithInAlloca(InAlloca); I = AI; InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_LOAD: { // LOAD: [opty, op, align, vol] unsigned OpNum = 0; Value *Op; if (getValueTypePair(Record, OpNum, NextValueNo, Op) || (OpNum + 2 != Record.size() && OpNum + 3 != Record.size())) return error("Invalid record"); Type *Ty = nullptr; if (OpNum + 3 == Record.size()) Ty = getTypeByID(Record[OpNum++]); if (std::error_code EC = typeCheckLoadStoreInst(DiagnosticHandler, Ty, Op->getType())) return EC; if (!Ty) Ty = cast<PointerType>(Op->getType())->getElementType(); unsigned Align; if (std::error_code EC = parseAlignmentValue(Record[OpNum], Align)) return EC; I = new LoadInst(Ty, Op, "", Record[OpNum + 1], Align); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_LOADATOMIC: { // LOADATOMIC: [opty, op, align, vol, ordering, synchscope] unsigned OpNum = 0; Value *Op; if (getValueTypePair(Record, OpNum, NextValueNo, Op) || (OpNum + 4 != Record.size() && OpNum + 5 != Record.size())) return error("Invalid record"); Type *Ty = nullptr; if (OpNum + 5 == Record.size()) Ty = getTypeByID(Record[OpNum++]); if (std::error_code EC = typeCheckLoadStoreInst(DiagnosticHandler, Ty, Op->getType())) return EC; if (!Ty) Ty = cast<PointerType>(Op->getType())->getElementType(); AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); if (Ordering == NotAtomic || Ordering == Release || Ordering == AcquireRelease) return error("Invalid record"); if (Ordering != NotAtomic && Record[OpNum] == 0) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]); unsigned Align; if (std::error_code EC = parseAlignmentValue(Record[OpNum], Align)) return EC; I = new LoadInst(Op, "", Record[OpNum+1], Align, Ordering, SynchScope); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_STORE: case bitc::FUNC_CODE_INST_STORE_OLD: { // STORE2:[ptrty, ptr, val, align, vol] unsigned OpNum = 0; Value *Val, *Ptr; if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || (BitCode == bitc::FUNC_CODE_INST_STORE ? getValueTypePair(Record, OpNum, NextValueNo, Val) : popValue(Record, OpNum, NextValueNo, cast<PointerType>(Ptr->getType())->getElementType(), Val)) || OpNum + 2 != Record.size()) return error("Invalid record"); if (std::error_code EC = typeCheckLoadStoreInst( DiagnosticHandler, Val->getType(), Ptr->getType())) return EC; unsigned Align; if (std::error_code EC = parseAlignmentValue(Record[OpNum], Align)) return EC; I = new StoreInst(Val, Ptr, Record[OpNum+1], Align); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_STOREATOMIC: case bitc::FUNC_CODE_INST_STOREATOMIC_OLD: { // STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, synchscope] unsigned OpNum = 0; Value *Val, *Ptr; if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || (BitCode == bitc::FUNC_CODE_INST_STOREATOMIC ? getValueTypePair(Record, OpNum, NextValueNo, Val) : popValue(Record, OpNum, NextValueNo, cast<PointerType>(Ptr->getType())->getElementType(), Val)) || OpNum + 4 != Record.size()) return error("Invalid record"); if (std::error_code EC = typeCheckLoadStoreInst( DiagnosticHandler, Val->getType(), Ptr->getType())) return EC; AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); if (Ordering == NotAtomic || Ordering == Acquire || Ordering == AcquireRelease) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]); if (Ordering != NotAtomic && Record[OpNum] == 0) return error("Invalid record"); unsigned Align; if (std::error_code EC = parseAlignmentValue(Record[OpNum], Align)) return EC; I = new StoreInst(Val, Ptr, Record[OpNum+1], Align, Ordering, SynchScope); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_CMPXCHG_OLD: case bitc::FUNC_CODE_INST_CMPXCHG: { // CMPXCHG:[ptrty, ptr, cmp, new, vol, successordering, synchscope, // failureordering?, isweak?] unsigned OpNum = 0; Value *Ptr, *Cmp, *New; if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || (BitCode == bitc::FUNC_CODE_INST_CMPXCHG ? getValueTypePair(Record, OpNum, NextValueNo, Cmp) : popValue(Record, OpNum, NextValueNo, cast<PointerType>(Ptr->getType())->getElementType(), Cmp)) || popValue(Record, OpNum, NextValueNo, Cmp->getType(), New) || Record.size() < OpNum + 3 || Record.size() > OpNum + 5) return error("Invalid record"); AtomicOrdering SuccessOrdering = getDecodedOrdering(Record[OpNum + 1]); if (SuccessOrdering == NotAtomic || SuccessOrdering == Unordered) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 2]); if (std::error_code EC = typeCheckLoadStoreInst( DiagnosticHandler, Cmp->getType(), Ptr->getType())) return EC; AtomicOrdering FailureOrdering; if (Record.size() < 7) FailureOrdering = AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrdering); else FailureOrdering = getDecodedOrdering(Record[OpNum + 3]); I = new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering, SynchScope); cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]); if (Record.size() < 8) { // Before weak cmpxchgs existed, the instruction simply returned the // value loaded from memory, so bitcode files from that era will be // expecting the first component of a modern cmpxchg. CurBB->getInstList().push_back(I); I = ExtractValueInst::Create(I, 0); } else { cast<AtomicCmpXchgInst>(I)->setWeak(Record[OpNum+4]); } InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_ATOMICRMW: { // ATOMICRMW:[ptrty, ptr, val, op, vol, ordering, synchscope] unsigned OpNum = 0; Value *Ptr, *Val; if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || popValue(Record, OpNum, NextValueNo, cast<PointerType>(Ptr->getType())->getElementType(), Val) || OpNum+4 != Record.size()) return error("Invalid record"); AtomicRMWInst::BinOp Operation = getDecodedRMWOperation(Record[OpNum]); if (Operation < AtomicRMWInst::FIRST_BINOP || Operation > AtomicRMWInst::LAST_BINOP) return error("Invalid record"); AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); if (Ordering == NotAtomic || Ordering == Unordered) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]); I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope); cast<AtomicRMWInst>(I)->setVolatile(Record[OpNum+1]); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_FENCE: { // FENCE:[ordering, synchscope] if (2 != Record.size()) return error("Invalid record"); AtomicOrdering Ordering = getDecodedOrdering(Record[0]); if (Ordering == NotAtomic || Ordering == Unordered || Ordering == Monotonic) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[1]); I = new FenceInst(Context, Ordering, SynchScope); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_CALL: { // CALL: [paramattrs, cc, fnty, fnid, arg0, arg1...] if (Record.size() < 3) return error("Invalid record"); unsigned OpNum = 0; AttributeSet PAL = getAttributes(Record[OpNum++]); unsigned CCInfo = Record[OpNum++]; FunctionType *FTy = nullptr; if (CCInfo >> 15 & 1 && !(FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++])))) return error("Explicit call type is not a function type"); Value *Callee; if (getValueTypePair(Record, OpNum, NextValueNo, Callee)) return error("Invalid record"); PointerType *OpTy = dyn_cast<PointerType>(Callee->getType()); if (!OpTy) return error("Callee is not a pointer type"); if (!FTy) { FTy = dyn_cast<FunctionType>(OpTy->getElementType()); if (!FTy) return error("Callee is not of pointer to function type"); } else if (OpTy->getElementType() != FTy) return error("Explicit call type does not match pointee type of " "callee operand"); if (Record.size() < FTy->getNumParams() + OpNum) return error("Insufficient operands to call"); SmallVector<Value*, 16> Args; // Read the fixed params. for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) { if (FTy->getParamType(i)->isLabelTy()) Args.push_back(getBasicBlock(Record[OpNum])); else Args.push_back(getValue(Record, OpNum, NextValueNo, FTy->getParamType(i))); if (!Args.back()) return error("Invalid record"); } // Read type/value pairs for varargs params. if (!FTy->isVarArg()) { if (OpNum != Record.size()) return error("Invalid record"); } else { while (OpNum != Record.size()) { Value *Op; if (getValueTypePair(Record, OpNum, NextValueNo, Op)) return error("Invalid record"); Args.push_back(Op); } } I = CallInst::Create(FTy, Callee, Args); InstructionList.push_back(I); cast<CallInst>(I)->setCallingConv( static_cast<CallingConv::ID>((~(1U << 14) & CCInfo) >> 1)); CallInst::TailCallKind TCK = CallInst::TCK_None; if (CCInfo & 1) TCK = CallInst::TCK_Tail; if (CCInfo & (1 << 14)) TCK = CallInst::TCK_MustTail; cast<CallInst>(I)->setTailCallKind(TCK); cast<CallInst>(I)->setAttributes(PAL); break; } case bitc::FUNC_CODE_INST_VAARG: { // VAARG: [valistty, valist, instty] if (Record.size() < 3) return error("Invalid record"); Type *OpTy = getTypeByID(Record[0]); Value *Op = getValue(Record, 1, NextValueNo, OpTy); Type *ResTy = getTypeByID(Record[2]); if (!OpTy || !Op || !ResTy) return error("Invalid record"); I = new VAArgInst(Op, ResTy); InstructionList.push_back(I); break; } } // Add instruction to end of current BB. If there is no current BB, reject // this file. if (!CurBB) { delete I; return error("Invalid instruction with no BB"); } CurBB->getInstList().push_back(I); // If this was a terminator instruction, move to the next block. if (isa<TerminatorInst>(I)) { ++CurBBNo; CurBB = CurBBNo < FunctionBBs.size() ? FunctionBBs[CurBBNo] : nullptr; } // Non-void values get registered in the value table for future use. if (I && !I->getType()->isVoidTy()) ValueList.assignValue(I, NextValueNo++); } OutOfRecordLoop: // Check the function list for unresolved values. if (Argument *A = dyn_cast<Argument>(ValueList.back())) { if (!A->getParent()) { // We found at least one unresolved value. Nuke them all to avoid leaks. for (unsigned i = ModuleValueListSize, e = ValueList.size(); i != e; ++i){ if ((A = dyn_cast_or_null<Argument>(ValueList[i])) && !A->getParent()) { A->replaceAllUsesWith(UndefValue::get(A->getType())); delete A; } } return error("Never resolved value found in function"); } } // FIXME: Check for unresolved forward-declared metadata references // and clean up leaks. // Trim the value list down to the size it was before we parsed this function. ValueList.shrinkTo(ModuleValueListSize); MDValueList.shrinkTo(ModuleMDValueListSize); std::vector<BasicBlock*>().swap(FunctionBBs); return std::error_code(); } /// Find the function body in the bitcode stream std::error_code BitcodeReader::findFunctionInStream( Function *F, DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator) { while (DeferredFunctionInfoIterator->second == 0) { if (Stream.AtEndOfStream()) return error("Could not find function in stream"); // ParseModule will parse the next body in the stream and set its // position in the DeferredFunctionInfo map. if (std::error_code EC = parseModule(true)) return EC; } return std::error_code(); } //===----------------------------------------------------------------------===// // GVMaterializer implementation //===----------------------------------------------------------------------===// // HLSL Change: remove unused function // void BitcodeReader::releaseBuffer() { Buffer.release(); } std::error_code BitcodeReader::materialize(GlobalValue *GV) { if (std::error_code EC = materializeMetadata()) return EC; Function *F = dyn_cast<Function>(GV); // If it's not a function or is already material, ignore the request. if (!F || !F->isMaterializable()) return std::error_code(); DenseMap<Function*, uint64_t>::iterator DFII = DeferredFunctionInfo.find(F); assert(DFII != DeferredFunctionInfo.end() && "Deferred function not found!"); // If its position is recorded as 0, its body is somewhere in the stream // but we haven't seen it yet. if (DFII->second == 0) if (std::error_code EC = findFunctionInStream(F, DFII)) return EC; // Move the bit stream to the saved position of the deferred function body. Stream.JumpToBit(DFII->second); if (std::error_code EC = parseFunctionBody(F)) return EC; F->setIsMaterializable(false); if (StripDebugInfo) stripDebugInfo(*F); // Upgrade any old intrinsic calls in the function. for (auto &I : UpgradedIntrinsics) { for (auto UI = I.first->user_begin(), UE = I.first->user_end(); UI != UE;) { User *U = *UI; ++UI; if (CallInst *CI = dyn_cast<CallInst>(U)) UpgradeIntrinsicCall(CI, I.second); } } // Bring in any functions that this function forward-referenced via // blockaddresses. return materializeForwardReferencedFunctions(); } bool BitcodeReader::isDematerializable(const GlobalValue *GV) const { const Function *F = dyn_cast<Function>(GV); if (!F || F->isDeclaration()) return false; // Dematerializing F would leave dangling references that wouldn't be // reconnected on re-materialization. if (BlockAddressesTaken.count(F)) return false; return DeferredFunctionInfo.count(const_cast<Function*>(F)); } void BitcodeReader::dematerialize(GlobalValue *GV) { Function *F = dyn_cast<Function>(GV); // If this function isn't dematerializable, this is a noop. if (!F || !isDematerializable(F)) return; assert(DeferredFunctionInfo.count(F) && "No info to read function later?"); // Just forget the function body, we can remat it later. F->dropAllReferences(); F->setIsMaterializable(true); } std::error_code BitcodeReader::materializeModule(Module *M) { assert(M == TheModule && "Can only Materialize the Module this BitcodeReader is attached to."); if (std::error_code EC = materializeMetadata()) return EC; // Promise to materialize all forward references. WillMaterializeAllForwardRefs = true; // Iterate over the module, deserializing any functions that are still on // disk. for (Module::iterator F = TheModule->begin(), E = TheModule->end(); F != E; ++F) { if (std::error_code EC = materialize(F)) return EC; } // At this point, if there are any function bodies, the current bit is // pointing to the END_BLOCK record after them. Now make sure the rest // of the bits in the module have been read. if (NextUnreadBit) parseModule(true); // Check that all block address forward references got resolved (as we // promised above). if (!BasicBlockFwdRefs.empty()) return error("Never resolved function from blockaddress"); // Upgrade any intrinsic calls that slipped through (should not happen!) and // delete the old functions to clean up. We can't do this unless the entire // module is materialized because there could always be another function body // with calls to the old function. for (auto &I : UpgradedIntrinsics) { for (auto *U : I.first->users()) { if (CallInst *CI = dyn_cast<CallInst>(U)) UpgradeIntrinsicCall(CI, I.second); } if (!I.first->use_empty()) I.first->replaceAllUsesWith(I.second); I.first->eraseFromParent(); } UpgradedIntrinsics.clear(); for (unsigned I = 0, E = InstsWithTBAATag.size(); I < E; I++) UpgradeInstWithTBAATag(InstsWithTBAATag[I]); UpgradeDebugInfo(*M); // HLSL Change Starts if (ShouldTrackBitstreamUsage && !Tracker.isDense((uint64_t)(Buffer->getBufferSize()) * 8)) { ReportWarning(DiagnosticHandler, "Unused bits in buffer."); } // HLSL Change Ends return std::error_code(); } std::vector<StructType *> BitcodeReader::getIdentifiedStructTypes() const { return IdentifiedStructTypes; } std::error_code BitcodeReader::initStream(std::unique_ptr<DataStreamer> Streamer) { if (Streamer) return initLazyStream(std::move(Streamer)); return initStreamFromBuffer(); } std::error_code BitcodeReader::initStreamFromBuffer() { const unsigned char *BufPtr = (const unsigned char*)Buffer->getBufferStart(); const unsigned char *BufEnd = BufPtr+Buffer->getBufferSize(); if (Buffer->getBufferSize() & 3) return error("Invalid bitcode size"); // HLSL Change - bitcode size is the problem, not the signature per se // If we have a wrapper header, parse it and ignore the non-bc file contents. // The magic number is 0x0B17C0DE stored in little endian. if (isBitcodeWrapper(BufPtr, BufEnd)) if (SkipBitcodeWrapperHeader(BufPtr, BufEnd, true)) return error("Invalid bitcode wrapper header"); StreamFile.reset(new BitstreamReader(BufPtr, BufEnd)); if (ShouldTrackBitstreamUsage) StreamFile->Tracker = &Tracker; // HLSL Change Stream.init(&*StreamFile); return std::error_code(); } std::error_code BitcodeReader::initLazyStream(std::unique_ptr<DataStreamer> Streamer) { // Check and strip off the bitcode wrapper; BitstreamReader expects never to // see it. auto OwnedBytes = llvm::make_unique<StreamingMemoryObject>(std::move(Streamer)); StreamingMemoryObject &Bytes = *OwnedBytes; StreamFile = llvm::make_unique<BitstreamReader>(std::move(OwnedBytes)); Stream.init(&*StreamFile); unsigned char buf[16]; if (Bytes.readBytes(buf, 16, 0) != 16) return error("Invalid bitcode signature"); if (!isBitcode(buf, buf + 16)) return error("Invalid bitcode signature"); if (isBitcodeWrapper(buf, buf + 4)) { const unsigned char *bitcodeStart = buf; const unsigned char *bitcodeEnd = buf + 16; SkipBitcodeWrapperHeader(bitcodeStart, bitcodeEnd, false); Bytes.dropLeadingBytes(bitcodeStart - buf); Bytes.setKnownObjectSize(bitcodeEnd - bitcodeStart); } return std::error_code(); } namespace { class BitcodeErrorCategoryType : public std::error_category { const char *name() const LLVM_NOEXCEPT override { return "llvm.bitcode"; } std::string message(int IE) const override { BitcodeError E = static_cast<BitcodeError>(IE); switch (E) { case BitcodeError::InvalidBitcodeSignature: return "Invalid bitcode signature"; case BitcodeError::CorruptedBitcode: return "Corrupted bitcode"; } llvm_unreachable("Unknown error type!"); } }; } static BitcodeErrorCategoryType g_ErrorCategory; // HLSL Change - not a ManagedStatic const std::error_category &llvm::BitcodeErrorCategory() { return g_ErrorCategory; // HLSL Change - simple global } //===----------------------------------------------------------------------===// // External interface //===----------------------------------------------------------------------===// static ErrorOr<std::unique_ptr<Module>> getBitcodeModuleImpl(std::unique_ptr<DataStreamer> Streamer, StringRef Name, std::unique_ptr<BitcodeReader> RPtr, LLVMContext &Context, // HLSL Change: unique_ptr bool MaterializeAll, bool ShouldLazyLoadMetadata) { std::unique_ptr<Module> M = make_unique<Module>(Name, Context); // HLSL Change Begin: Transfer ownership of R to M, but keep a raw pointer BitcodeReader* R = RPtr.release(); M->setMaterializer(R); // HLSL Change End // Delay parsing Metadata if ShouldLazyLoadMetadata is true. if (std::error_code EC = R->parseBitcodeInto(std::move(Streamer), M.get(), ShouldLazyLoadMetadata)) return EC; // HLSL Change: Correct memory management of BitcodeReader.buffer if (MaterializeAll) { // Read in the entire module, and destroy the BitcodeReader. if (std::error_code EC = M->materializeAllPermanently()) return EC; // HLSL Change: Correct memory management of BitcodeReader.buffer } else { // Resolve forward references from blockaddresses. if (std::error_code EC = R->materializeForwardReferencedFunctions()) return EC; // HLSL Change: Correct memory management of BitcodeReader.buffer } return std::move(M); } /// \brief Get a lazy one-at-time loading module from bitcode. /// /// This isn't always used in a lazy context. In particular, it's also used by /// \a parseBitcodeFile(). If this is truly lazy, then we need to eagerly pull /// in forward-referenced functions from block address references. /// /// \param[in] MaterializeAll Set to \c true if we should materialize /// everything. static ErrorOr<std::unique_ptr<Module>> getLazyBitcodeModuleImpl(std::unique_ptr<MemoryBuffer> &&Buffer, LLVMContext &Context, bool MaterializeAll, DiagnosticHandlerFunction DiagnosticHandler, bool ShouldLazyLoadMetadata = false, bool ShouldTrackBitstreamUsage = false) // HLSL Change { // HLSL Change Begin: Proper memory management with unique_ptr // Get the buffer identifier before we transfer the ownership to the bitcode reader, // this is ugly but safe as long as it keeps the buffer, and hence identifier string, alive. const char* BufferIdentifier = Buffer->getBufferIdentifier(); std::unique_ptr<BitcodeReader> R = llvm::make_unique<BitcodeReader>( std::move(Buffer), Context, DiagnosticHandler); if (R) R->ShouldTrackBitstreamUsage = ShouldTrackBitstreamUsage; // HLSL Change ErrorOr<std::unique_ptr<Module>> Ret = getBitcodeModuleImpl(nullptr, BufferIdentifier, std::move(R), Context, MaterializeAll, ShouldLazyLoadMetadata); // HLSL Change End if (!Ret) return Ret; return Ret; } ErrorOr<std::unique_ptr<Module>> llvm::getLazyBitcodeModule( std::unique_ptr<MemoryBuffer> &&Buffer, LLVMContext &Context, DiagnosticHandlerFunction DiagnosticHandler, bool ShouldLazyLoadMetadata, bool ShouldTrackBitstreamUsage) { return getLazyBitcodeModuleImpl(std::move(Buffer), Context, false, DiagnosticHandler, ShouldLazyLoadMetadata, ShouldTrackBitstreamUsage); // HLSL Change } ErrorOr<std::unique_ptr<Module>> llvm::getStreamedBitcodeModule( StringRef Name, std::unique_ptr<DataStreamer> Streamer, LLVMContext &Context, DiagnosticHandlerFunction DiagnosticHandler) { std::unique_ptr<Module> M = make_unique<Module>(Name, Context); std::unique_ptr<BitcodeReader> R = llvm::make_unique<BitcodeReader>(Context, DiagnosticHandler); // HLSL Change: unique_ptr return getBitcodeModuleImpl(std::move(Streamer), Name, std::move(R), Context, false, // HLSL Change: std::move false); } // HLSL Change Starts struct report_fatal_error_data { report_fatal_error_data(DiagnosticHandlerFunction DH) : DiagnosticHandler(DH) {} DiagnosticHandlerFunction DiagnosticHandler; }; void report_fatal_error_handler(void *user_datam, const std::string &reason, bool gen_crash_diag) { report_fatal_error_data *data = (report_fatal_error_data *)user_datam; BitcodeDiagnosticInfo BDI(std::error_code(EINVAL, std::system_category()), DiagnosticSeverity::DS_Error, reason); data->DiagnosticHandler(BDI); throw std::runtime_error("Invalid bitcode"); } // HLSL Change Ends ErrorOr<std::unique_ptr<Module>> llvm::parseBitcodeFile(MemoryBufferRef Buffer, LLVMContext &Context, DiagnosticHandlerFunction DiagnosticHandler, bool ShouldTrackBitstreamUsage) // HLSL Change { // HLSL Change Starts - introduce a ScopedFatalErrorHandler to handle // report_fatal_error from readers. report_fatal_error_data data(DiagnosticHandler); ScopedFatalErrorHandler SFE(report_fatal_error_handler, &data); // HLSL Change Ends std::unique_ptr<MemoryBuffer> Buf = MemoryBuffer::getMemBuffer(Buffer, false); return getLazyBitcodeModuleImpl(std::move(Buf), Context, true, DiagnosticHandler, false, ShouldTrackBitstreamUsage); // HLSL Change // TODO: Restore the use-lists to the in-memory state when the bitcode was // written. We must defer until the Module has been fully materialized. } std::string llvm::getBitcodeTargetTriple(MemoryBufferRef Buffer, LLVMContext &Context, DiagnosticHandlerFunction DiagnosticHandler) { std::unique_ptr<MemoryBuffer> Buf = MemoryBuffer::getMemBuffer(Buffer, false); auto R = llvm::make_unique<BitcodeReader>(std::move(Buf), Context, // HLSL Change: std::move DiagnosticHandler); ErrorOr<std::string> Triple = R->parseTriple(); if (Triple.getError()) return ""; return Triple.get(); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/IRReader/IRReader.cpp
//===---- IRReader.cpp - Reader for LLVM IR files -------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/IRReader/IRReader.h" // #include "llvm-c/Core.h" // #include "llvm-c/IRReader.h" #include "llvm/AsmParser/Parser.h" #include "llvm/Bitcode/ReaderWriter.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/Timer.h" #include "llvm/Support/raw_ostream.h" #include <system_error> using namespace llvm; namespace llvm { extern bool TimePassesIsEnabled; } static const char *const TimeIRParsingGroupName = "LLVM IR Parsing"; static const char *const TimeIRParsingName = "Parse IR"; static std::unique_ptr<Module> getLazyIRModule(std::unique_ptr<MemoryBuffer> Buffer, SMDiagnostic &Err, LLVMContext &Context) { if (isBitcode((const unsigned char *)Buffer->getBufferStart(), (const unsigned char *)Buffer->getBufferEnd())) { ErrorOr<std::unique_ptr<Module>> ModuleOrErr = getLazyBitcodeModule(std::move(Buffer), Context); if (std::error_code EC = ModuleOrErr.getError()) { Err = SMDiagnostic(Buffer->getBufferIdentifier(), SourceMgr::DK_Error, EC.message()); return nullptr; } return std::move(ModuleOrErr.get()); } return parseAssembly(Buffer->getMemBufferRef(), Err, Context); } std::unique_ptr<Module> llvm::getLazyIRFileModule(StringRef Filename, SMDiagnostic &Err, LLVMContext &Context) { ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr = MemoryBuffer::getFileOrSTDIN(Filename); if (std::error_code EC = FileOrErr.getError()) { Err = SMDiagnostic(Filename, SourceMgr::DK_Error, "Could not open input file: " + EC.message()); return nullptr; } return getLazyIRModule(std::move(FileOrErr.get()), Err, Context); } std::unique_ptr<Module> llvm::parseIR(MemoryBufferRef Buffer, SMDiagnostic &Err, LLVMContext &Context) { NamedRegionTimer T(TimeIRParsingName, TimeIRParsingGroupName, TimePassesIsEnabled); if (isBitcode((const unsigned char *)Buffer.getBufferStart(), (const unsigned char *)Buffer.getBufferEnd())) { ErrorOr<std::unique_ptr<Module>> ModuleOrErr = parseBitcodeFile(Buffer, Context); if (std::error_code EC = ModuleOrErr.getError()) { Err = SMDiagnostic(Buffer.getBufferIdentifier(), SourceMgr::DK_Error, EC.message()); return nullptr; } return std::move(ModuleOrErr.get()); } return parseAssembly(Buffer, Err, Context); } std::unique_ptr<Module> llvm::parseIRFile(StringRef Filename, SMDiagnostic &Err, LLVMContext &Context) { ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr = MemoryBuffer::getFileOrSTDIN(Filename); if (std::error_code EC = FileOrErr.getError()) { Err = SMDiagnostic(Filename, SourceMgr::DK_Error, "Could not open input file: " + EC.message()); return nullptr; } return parseIR(FileOrErr.get()->getMemBufferRef(), Err, Context); } #if 0 // HLSL Change Starts //===----------------------------------------------------------------------===// // C API. //===----------------------------------------------------------------------===// LLVMBool LLVMParseIRInContext(LLVMContextRef ContextRef, LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutM, char **OutMessage) { SMDiagnostic Diag; std::unique_ptr<MemoryBuffer> MB(unwrap(MemBuf)); *OutM = wrap(parseIR(MB->getMemBufferRef(), Diag, *unwrap(ContextRef)).release()); if(!*OutM) { if (OutMessage) { std::string buf; raw_string_ostream os(buf); Diag.print(nullptr, os, false); os.flush(); *OutMessage = _strdup(buf.c_str()); } return 1; } return 0; } #endif // HLSL Change Ends
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/IRReader/CMakeLists.txt
add_llvm_library(LLVMIRReader IRReader.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/IRReader DEPENDS intrinsics_gen )
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/IRReader/LLVMBuild.txt
;===- ./lib/IRReader/LLVMBuild.txt -----------------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = IRReader parent = Libraries required_libraries = AsmParser BitReader Core Support
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcSupport/WinAdapter.cpp
//===-- WinAdapter.cpp - Windows Adapter for other platforms ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "assert.h" #include "dxc/Support/WinFunctions.h" #include "dxc/Support/WinIncludes.h" #ifndef _WIN32 #include "dxc/Support/Unicode.h" //===--------------------------- CAllocator -------------------------------===// void *CAllocator::Reallocate(void *p, size_t nBytes) throw() { return realloc(p, nBytes); } void *CAllocator::Allocate(size_t nBytes) throw() { return malloc(nBytes); } void CAllocator::Free(void *p) throw() { free(p); } //===--------------------------- BSTR Allocation --------------------------===// void SysFreeString(BSTR bstrString) { if (bstrString) free((void *)((uintptr_t)bstrString - sizeof(uint32_t))); } // Allocate string with length prefix // https://docs.microsoft.com/en-us/previous-versions/windows/desktop/automat/bstr BSTR SysAllocStringLen(const OLECHAR *strIn, UINT ui) { uint32_t *blobOut = (uint32_t *)malloc(sizeof(uint32_t) + (ui + 1) * sizeof(OLECHAR)); if (!blobOut) return nullptr; // Size in bytes without trailing NULL character blobOut[0] = ui * sizeof(OLECHAR); BSTR strOut = (BSTR)&blobOut[1]; if (strIn) memcpy(strOut, strIn, blobOut[0]); // Write trailing NULL character: strOut[ui] = 0; return strOut; } //===--------------------------- BSTR Length ------------------------------===// unsigned int SysStringLen(const BSTR bstrString) { if (!bstrString) return 0; uint32_t *blobIn = (uint32_t *)((uintptr_t)bstrString - sizeof(uint32_t)); return blobIn[0] / sizeof(OLECHAR); } //===--------------------------- CHandle -------------------------------===// CHandle::CHandle(HANDLE h) { m_h = h; } CHandle::~CHandle() { CloseHandle(m_h); } CHandle::operator HANDLE() const throw() { return m_h; } // CComBSTR CComBSTR::CComBSTR(int nSize, LPCWSTR sz) { if (nSize < 0) { throw std::invalid_argument("CComBSTR must have size >= 0"); } if (nSize == 0) { m_str = NULL; } else { m_str = SysAllocStringLen(sz, nSize); if (!*this) { std::runtime_error("out of memory"); } } } bool CComBSTR::operator==(const CComBSTR &bstrSrc) const throw() { return wcscmp(m_str, bstrSrc.m_str) == 0; } //===--------------------------- WArgV -------------------------------===// WArgV::WArgV(int argc, const char **argv) : WStringVector(argc), WCharPtrVector(argc) { for (int i = 0; i < argc; ++i) { std::string S(argv[i]); const int wideLength = ::MultiByteToWideChar( CP_UTF8, MB_ERR_INVALID_CHARS, S.data(), S.size(), nullptr, 0); assert(wideLength > 0 && "else it should have failed during size calculation"); WStringVector[i].resize(wideLength); ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, S.data(), S.size(), &(WStringVector[i])[0], WStringVector[i].size()); WCharPtrVector[i] = WStringVector[i].data(); } } #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcSupport/dxcapi.use.cpp
/////////////////////////////////////////////////////////////////////////////// // // // dxcapi.use.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Provides support for DXC API users. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/Support/WinIncludes.h" #include "dxc/Support/dxcapi.use.h" #include "dxc/Support/FileIOHelper.h" #include "dxc/Support/Global.h" #include "dxc/Support/SharedLibAffix.h" // header generated during DXC build #include "dxc/Support/Unicode.h" #include "dxc/Support/WinFunctions.h" namespace dxc { const char *kDxCompilerLib = CMAKE_SHARED_LIBRARY_PREFIX "dxcompiler" CMAKE_SHARED_LIBRARY_SUFFIX; const char *kDxilLib = CMAKE_SHARED_LIBRARY_PREFIX "dxil" CMAKE_SHARED_LIBRARY_SUFFIX; #ifdef _WIN32 static void TrimEOL(char *pMsg) { char *pEnd = pMsg + strlen(pMsg); --pEnd; while (pEnd > pMsg && (*pEnd == '\r' || *pEnd == '\n')) { --pEnd; } pEnd[1] = '\0'; } static std::string GetWin32ErrorMessage(DWORD err) { char formattedMsg[200]; DWORD formattedMsgLen = FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr, err, 0, formattedMsg, _countof(formattedMsg), 0); if (formattedMsgLen > 0 && formattedMsgLen < _countof(formattedMsg)) { TrimEOL(formattedMsg); return std::string(formattedMsg); } return std::string(); } #else static std::string GetWin32ErrorMessage(DWORD err) { // Since we use errno for handling messages, we use strerror to get the error // message. return std::string(std::strerror(err)); } #endif // _WIN32 void IFT_Data(HRESULT hr, LPCWSTR data) { if (SUCCEEDED(hr)) return; CW2A pData(data); std::string errMsg; if (HRESULT_IS_WIN32ERR(hr)) { DWORD err = HRESULT_AS_WIN32ERR(hr); errMsg.append(GetWin32ErrorMessage(err)); if (data != nullptr) { errMsg.append(" ", 1); } } if (data != nullptr) { errMsg.append(pData); } throw ::hlsl::Exception(hr, errMsg); } void EnsureEnabled(DxcDllSupport &dxcSupport) { if (!dxcSupport.IsEnabled()) { IFT(dxcSupport.Initialize()); } } void ReadFileIntoBlob(DxcDllSupport &dxcSupport, LPCWSTR pFileName, IDxcBlobEncoding **ppBlobEncoding) { CComPtr<IDxcLibrary> library; IFT(dxcSupport.CreateInstance(CLSID_DxcLibrary, &library)); IFT_Data(library->CreateBlobFromFile(pFileName, nullptr, ppBlobEncoding), pFileName); } void WriteOperationErrorsToConsole(IDxcOperationResult *pResult, bool outputWarnings) { HRESULT status; IFT(pResult->GetStatus(&status)); if (FAILED(status) || outputWarnings) { CComPtr<IDxcBlobEncoding> pErrors; IFT(pResult->GetErrorBuffer(&pErrors)); if (pErrors.p != nullptr) { WriteBlobToConsole(pErrors, STD_ERROR_HANDLE); } } } void WriteOperationResultToConsole(IDxcOperationResult *pRewriteResult, bool outputWarnings) { WriteOperationErrorsToConsole(pRewriteResult, outputWarnings); CComPtr<IDxcBlob> pBlob; IFT(pRewriteResult->GetResult(&pBlob)); WriteBlobToConsole(pBlob, STD_OUTPUT_HANDLE); } static void WriteWideNullTermToConsole(const wchar_t *pText, DWORD streamType) { if (pText == nullptr) { return; } bool lossy; // Note: even if there was loss, print anyway std::string consoleMessage; Unicode::WideToConsoleString(pText, &consoleMessage, &lossy); if (streamType == STD_OUTPUT_HANDLE) { fprintf(stdout, "%s\n", consoleMessage.c_str()); } else if (streamType == STD_ERROR_HANDLE) { fprintf(stderr, "%s\n", consoleMessage.c_str()); } else { throw hlsl::Exception(E_INVALIDARG); } } static HRESULT BlobToUtf8IfText(IDxcBlob *pBlob, IDxcBlobUtf8 **ppBlobUtf8) { CComPtr<IDxcBlobEncoding> pBlobEncoding; if (SUCCEEDED(pBlob->QueryInterface(&pBlobEncoding))) { BOOL known; UINT32 cp = 0; IFT(pBlobEncoding->GetEncoding(&known, &cp)); if (known) { return hlsl::DxcGetBlobAsUtf8(pBlob, nullptr, ppBlobUtf8); } } return S_OK; } static HRESULT BlobToWideIfText(IDxcBlob *pBlob, IDxcBlobWide **ppBlobWide) { CComPtr<IDxcBlobEncoding> pBlobEncoding; if (SUCCEEDED(pBlob->QueryInterface(&pBlobEncoding))) { BOOL known; UINT32 cp = 0; IFT(pBlobEncoding->GetEncoding(&known, &cp)); if (known) { return hlsl::DxcGetBlobAsWide(pBlob, nullptr, ppBlobWide); } } return S_OK; } void WriteBlobToConsole(IDxcBlob *pBlob, DWORD streamType) { if (pBlob == nullptr) { return; } // Try to get as UTF-16 or UTF-8 BOOL known; UINT32 cp = 0; CComPtr<IDxcBlobEncoding> pBlobEncoding; IFT(pBlob->QueryInterface(&pBlobEncoding)); IFT(pBlobEncoding->GetEncoding(&known, &cp)); if (cp == DXC_CP_WIDE) { CComPtr<IDxcBlobWide> pWide; IFT(hlsl::DxcGetBlobAsWide(pBlob, nullptr, &pWide)); WriteWideNullTermToConsole(pWide->GetStringPointer(), streamType); } else if (cp == CP_UTF8) { CComPtr<IDxcBlobUtf8> pUtf8; IFT(hlsl::DxcGetBlobAsUtf8(pBlob, nullptr, &pUtf8)); WriteUtf8ToConsoleSizeT(pUtf8->GetStringPointer(), pUtf8->GetStringLength(), streamType); } } void WriteBlobToFile(IDxcBlob *pBlob, LPCWSTR pFileName, UINT32 textCodePage) { if (pBlob == nullptr) { return; } CHandle file(CreateFileW(pFileName, GENERIC_WRITE, FILE_SHARE_READ, nullptr, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr)); if (file == INVALID_HANDLE_VALUE) { IFT_Data(HRESULT_FROM_WIN32(GetLastError()), pFileName); } WriteBlobToHandle(pBlob, file, pFileName, textCodePage); } void WriteBlobToHandle(IDxcBlob *pBlob, HANDLE hFile, LPCWSTR pFileName, UINT32 textCodePage) { if (pBlob == nullptr) { return; } LPCVOID pPtr = pBlob->GetBufferPointer(); SIZE_T size = pBlob->GetBufferSize(); std::string BOM; CComPtr<IDxcBlobUtf8> pBlobUtf8; CComPtr<IDxcBlobWide> pBlobWide; if (textCodePage == DXC_CP_UTF8) { IFT_Data(BlobToUtf8IfText(pBlob, &pBlobUtf8), pFileName); if (pBlobUtf8) { pPtr = pBlobUtf8->GetStringPointer(); size = pBlobUtf8->GetStringLength(); // TBD: Should we write UTF-8 BOM? // BOM = "\xef\xbb\xbf"; // UTF-8 } } else if (textCodePage == DXC_CP_WIDE) { IFT_Data(BlobToWideIfText(pBlob, &pBlobWide), pFileName); if (pBlobWide) { pPtr = pBlobWide->GetStringPointer(); size = pBlobWide->GetStringLength() * sizeof(wchar_t); BOM = "\xff\xfe"; // UTF-16 LE } } IFT_Data(size > (SIZE_T)UINT32_MAX ? E_OUTOFMEMORY : S_OK, pFileName); DWORD written; if (!BOM.empty()) { if (FALSE == WriteFile(hFile, BOM.data(), BOM.length(), &written, nullptr)) { IFT_Data(HRESULT_FROM_WIN32(GetLastError()), pFileName); } } if (FALSE == WriteFile(hFile, pPtr, (DWORD)size, &written, nullptr)) { IFT_Data(HRESULT_FROM_WIN32(GetLastError()), pFileName); } } void WriteUtf8ToConsole(const char *pText, int charCount, DWORD streamType) { if (charCount == 0 || pText == nullptr) { return; } std::string resultToPrint; wchar_t *wideMessage = nullptr; size_t wideMessageLen; Unicode::UTF8BufferToWideBuffer(pText, charCount, &wideMessage, &wideMessageLen); WriteWideNullTermToConsole(wideMessage, streamType); delete[] wideMessage; } void WriteUtf8ToConsoleSizeT(const char *pText, size_t charCount, DWORD streamType) { if (charCount == 0) { return; } int charCountInt = 0; IFT(SizeTToInt(charCount, &charCountInt)); WriteUtf8ToConsole(pText, charCountInt, streamType); } } // namespace dxc
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcSupport/Global.cpp
/////////////////////////////////////////////////////////////////////////////// // // // Global.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/Support/Global.h" #include <system_error> #include "dxc/Support/WinIncludes.h" void CheckLLVMErrorCode(const std::error_code &ec) { if (ec) { DXASSERT(ec.category() == std::system_category(), "unexpected LLVM exception code"); throw hlsl::Exception(HRESULT_FROM_WIN32(ec.value())); } } static_assert(unsigned(DXC_E_OVERLAPPING_SEMANTICS) == 0x80AA0001, "Sanity check for DXC errors failed");
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcSupport/dxcmem.cpp
/////////////////////////////////////////////////////////////////////////////// // // // dxcmem.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Provides support for a thread-local allocator. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/Support/Global.h" #ifdef _WIN32 #include <specstrings.h> #endif #include "dxc/Support/WinFunctions.h" #include "dxc/Support/WinIncludes.h" #include "llvm/Support/ThreadLocal.h" #include <memory> static llvm::sys::ThreadLocal<IMalloc> *g_ThreadMallocTls; static IMalloc *g_pDefaultMalloc; HRESULT DxcInitThreadMalloc() throw() { // Allow a default malloc from a previous call to Init. // This will not be cleaned up in the call to Cleanup because // it can still be referenced after Cleanup is called. if (g_pDefaultMalloc) { g_pDefaultMalloc->AddRef(); } else { // We capture the default malloc early to avoid potential failures later on. HRESULT hrMalloc = DxcCoGetMalloc(1, &g_pDefaultMalloc); if (FAILED(hrMalloc)) return hrMalloc; } DXASSERT(g_ThreadMallocTls == nullptr, "else InitThreadMalloc already called"); g_ThreadMallocTls = (llvm::sys::ThreadLocal<IMalloc> *)g_pDefaultMalloc->Alloc( sizeof(llvm::sys::ThreadLocal<IMalloc>)); if (g_ThreadMallocTls == nullptr) { g_pDefaultMalloc->Release(); g_pDefaultMalloc = nullptr; return E_OUTOFMEMORY; } g_ThreadMallocTls = new (g_ThreadMallocTls) llvm::sys::ThreadLocal<IMalloc>; return S_OK; } void DxcCleanupThreadMalloc() throw() { if (g_ThreadMallocTls) { DXASSERT(g_pDefaultMalloc, "else DxcInitThreadMalloc didn't work/fail atomically"); g_ThreadMallocTls->~ThreadLocal(); g_pDefaultMalloc->Free(g_ThreadMallocTls); g_ThreadMallocTls = nullptr; } } IMalloc *DxcGetThreadMallocNoRef() throw() { if (g_ThreadMallocTls == nullptr) { return g_pDefaultMalloc; } return g_ThreadMallocTls->get(); } void DxcClearThreadMalloc() throw() { if (g_ThreadMallocTls != nullptr) { IMalloc *pMalloc = DxcGetThreadMallocNoRef(); g_ThreadMallocTls->erase(); if (pMalloc != nullptr) { pMalloc->Release(); } } } void DxcSetThreadMallocToDefault() throw() { DXASSERT(g_ThreadMallocTls != nullptr, "else prior to DxcInitThreadMalloc or after DxcCleanupThreadMalloc"); DXASSERT(DxcGetThreadMallocNoRef() == nullptr, "else nested allocation invoked"); g_ThreadMallocTls->set(g_pDefaultMalloc); g_pDefaultMalloc->AddRef(); } static IMalloc *DxcSwapThreadMalloc(IMalloc *pMalloc, IMalloc **ppPrior) throw() { DXASSERT(g_ThreadMallocTls != nullptr, "else prior to DxcInitThreadMalloc or after DxcCleanupThreadMalloc"); IMalloc *pPrior = DxcGetThreadMallocNoRef(); if (ppPrior) { *ppPrior = pPrior; } g_ThreadMallocTls->set(pMalloc); return pMalloc; } DxcThreadMalloc::DxcThreadMalloc(IMalloc *pMallocOrNull) throw() { p = DxcSwapThreadMalloc(pMallocOrNull ? pMallocOrNull : g_pDefaultMalloc, &pPrior); } DxcThreadMalloc::~DxcThreadMalloc() { DxcSwapThreadMalloc(pPrior, nullptr); } void *DxcNew(std::size_t size) throw() { void *ptr; IMalloc *iMalloc = DxcGetThreadMallocNoRef(); if (iMalloc != nullptr) { ptr = iMalloc->Alloc(size); } else { // DxcGetThreadMallocNoRef() returning null means the operator is called // before DllMain where the g_pDefaultMalloc is initialized, for example // from CRT libraries when static linking is enabled. In that case fallback // to the standard allocator and use CoTaskMemAlloc directly instead of // CoGetMalloc, Alloc & Release for better perf. ptr = CoTaskMemAlloc(size); } return ptr; } void DxcDelete(void *ptr) throw() { IMalloc *iMalloc = DxcGetThreadMallocNoRef(); if (iMalloc != nullptr) { iMalloc->Free(ptr); } else { // DxcGetThreadMallocNoRef() returning null means the operator is called // before DllMain where the g_pDefaultMalloc is initialized, for example // from CRT libraries when static linking is enabled. In that case fallback // to the standard allocator and use CoTaskMemFree directly instead of // CoGetMalloc, Free & Release for better perf. CoTaskMemFree(ptr); } }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcSupport/HLSLOptions.cpp
//===--- HLSLOptions.cpp - Driver Options Table ---------------------------===// /////////////////////////////////////////////////////////////////////////////// // // // HLSLOptions.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/Support/WinIncludes.h" #include "dxc/Support/dxcapi.use.h" #include "dxc/DXIL/DxilShaderModel.h" #include "dxc/DxilContainer/DxilContainer.h" #include "dxc/Support/Global.h" #include "dxc/Support/HLSLOptions.h" #include "dxc/Support/Unicode.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/Option/OptTable.h" #include "llvm/Option/Option.h" #include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" using namespace llvm::opt; using namespace dxc; using namespace hlsl; using namespace hlsl::options; #define PREFIX(NAME, VALUE) static const char *const NAME[] = VALUE; #include "dxc/Support/HLSLOptions.inc" #undef PREFIX static const OptTable::Info HlslInfoTable[] = { #define OPTION(PREFIX, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \ HELPTEXT, METAVAR) \ {PREFIX, NAME, HELPTEXT, METAVAR, OPT_##ID, Option::KIND##Class, \ PARAM, FLAGS, OPT_##GROUP, OPT_##ALIAS, ALIASARGS}, #include "dxc/Support/HLSLOptions.inc" #undef OPTION }; namespace { class HlslOptTable : public OptTable { public: HlslOptTable() : OptTable(HlslInfoTable, llvm::array_lengthof(HlslInfoTable)) {} }; } // namespace static HlslOptTable *g_HlslOptTable; std::error_code hlsl::options::initHlslOptTable() { DXASSERT(g_HlslOptTable == nullptr, "else double-init"); g_HlslOptTable = new (std::nothrow) HlslOptTable(); if (g_HlslOptTable == nullptr) return std::error_code(E_OUTOFMEMORY, std::system_category()); return std::error_code(); } void hlsl::options::cleanupHlslOptTable() { delete g_HlslOptTable; g_HlslOptTable = nullptr; } const OptTable *hlsl::options::getHlslOptTable() { return g_HlslOptTable; } void DxcDefines::push_back(llvm::StringRef value) { // Skip empty defines. if (value.size() > 0) { DefineStrings.push_back(value); } } UINT32 DxcDefines::ComputeNumberOfWCharsNeededForDefines() { UINT32 wcharSize = 0; for (llvm::StringRef &S : DefineStrings) { DXASSERT(S.size() > 0, "else DxcDefines::push_back should not have added this"); const int wideLength = ::MultiByteToWideChar( CP_UTF8, MB_ERR_INVALID_CHARS, S.data(), S.size(), nullptr, 0); IFTARG(wideLength != 0); wcharSize += wideLength + 1; // adding null terminated character } return wcharSize; } void DxcDefines::BuildDefines() { // Calculate and prepare the size of the backing buffer. DXASSERT(DefineValues == nullptr, "else DxcDefines is already built"); UINT32 wcharSize = ComputeNumberOfWCharsNeededForDefines(); DefineValues = new wchar_t[wcharSize]; DefineVector.resize(DefineStrings.size()); // Build up the define structures while filling in the backing buffer. UINT32 remaining = wcharSize; LPWSTR pWriteCursor = DefineValues; for (size_t i = 0; i < DefineStrings.size(); ++i) { llvm::StringRef &S = DefineStrings[i]; DxcDefine &D = DefineVector[i]; const int wideLength = ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, S.data(), S.size(), pWriteCursor, remaining); DXASSERT(wideLength > 0, "else it should have failed during size calculation"); LPWSTR pDefineEnd = pWriteCursor + wideLength; D.Name = pWriteCursor; LPWSTR pEquals = std::find(pWriteCursor, pDefineEnd, L'='); if (pEquals == pDefineEnd) { D.Value = nullptr; } else { *pEquals = L'\0'; D.Value = pEquals + 1; } // Advance past converted characters and include the null terminator. pWriteCursor += wideLength; *pWriteCursor = L'\0'; ++pWriteCursor; DXASSERT(pWriteCursor <= DefineValues + wcharSize, "else this function is calculating this incorrectly"); remaining -= (wideLength + 1); } } bool DxcOpts::IsRootSignatureProfile() const { return TargetProfile == "rootsig_1_0" || TargetProfile == "rootsig_1_1"; } bool DxcOpts::IsLibraryProfile() const { return TargetProfile.startswith("lib_"); } bool DxcOpts::GenerateFullDebugInfo() const { return DebugInfo; } bool DxcOpts::GeneratePDB() const { return DebugInfo || SourceOnlyDebug; } bool DxcOpts::EmbedDebugInfo() const { return EmbedDebug; } bool DxcOpts::EmbedPDBName() const { return GeneratePDB() || !DebugFile.empty(); } bool DxcOpts::DebugFileIsDirectory() const { return !DebugFile.empty() && llvm::sys::path::is_separator(DebugFile[DebugFile.size() - 1]); } llvm::StringRef DxcOpts::GetPDBName() const { if (!DebugFileIsDirectory()) return DebugFile; return llvm::StringRef(); } MainArgs::MainArgs(int argc, const wchar_t **argv, int skipArgCount) { if (argc > skipArgCount) { Utf8StringVector.reserve(argc - skipArgCount); Utf8CharPtrVector.reserve(argc - skipArgCount); for (int i = skipArgCount; i < argc; ++i) { Utf8StringVector.emplace_back(Unicode::WideToUTF8StringOrThrow(argv[i])); Utf8CharPtrVector.push_back(Utf8StringVector.back().data()); } } } MainArgs::MainArgs(int argc, const char **argv, int skipArgCount) { if (argc > skipArgCount) { Utf8StringVector.reserve(argc - skipArgCount); Utf8CharPtrVector.reserve(argc - skipArgCount); for (int i = skipArgCount; i < argc; ++i) { Utf8StringVector.emplace_back(argv[i]); Utf8CharPtrVector.push_back(Utf8StringVector.back().data()); } } } MainArgs::MainArgs(llvm::ArrayRef<llvm::StringRef> args) { Utf8StringVector.reserve(args.size()); Utf8CharPtrVector.reserve(args.size()); for (llvm::StringRef str : args) { Utf8StringVector.emplace_back(str.str()); Utf8CharPtrVector.push_back(Utf8StringVector.back().data()); } } MainArgs &MainArgs::operator=(const MainArgs &other) { Utf8StringVector.clear(); Utf8CharPtrVector.clear(); Utf8StringVector.reserve(other.Utf8StringVector.size()); Utf8CharPtrVector.reserve(other.Utf8StringVector.size()); for (const std::string &str : other.Utf8StringVector) { Utf8StringVector.emplace_back(str); Utf8CharPtrVector.push_back(Utf8StringVector.back().data()); } return *this; } StringRefWide::StringRefWide(llvm::StringRef value) { if (!value.empty()) m_value = Unicode::UTF8ToWideStringOrThrow(value.data()); } static bool GetTargetVersionFromString(llvm::StringRef ref, unsigned *major, unsigned *minor) { *major = *minor = -1; unsigned len = ref.size(); if (len < 6 || len > 11) // length: ps_6_0 to rootsig_1_0 return false; if (ref[len - 4] != '_' || ref[len - 2] != '_') return false; char cMajor = ref[len - 3]; char cMinor = ref[len - 1]; if (cMajor >= '0' && cMajor <= '9') *major = cMajor - '0'; else return false; if (cMinor == 'x') *minor = 0xF; else if (cMinor >= '0' && cMinor <= '9') *minor = cMinor - '0'; else return false; return true; } // Copied from CompilerInvocation since we parse our own diagnostic arguments static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group, OptSpecifier GroupWithValue, std::vector<std::string> &Diagnostics) { for (Arg *A : Args.filtered(Group)) { if (A->getOption().getKind() == Option::FlagClass) { // The argument is a pure flag (such as OPT_Wall or OPT_Wdeprecated). Add // its name (minus the "W" or "R" at the beginning) to the warning list. Diagnostics.push_back(A->getOption().getName().drop_front(1)); } else if (A->getOption().matches(GroupWithValue)) { // This is -Wfoo= or -Rfoo=, where foo is the name of the diagnostic // group. Diagnostics.push_back(A->getOption().getName().drop_front(1).rtrim("=-")); } else { // Otherwise, add its value (for OPT_W_Joined and similar). for (const char *Arg : A->getValues()) Diagnostics.emplace_back(Arg); } } } static std::pair<std::string, std::string> ParseDefine(std::string &argVal) { std::pair<std::string, std::string> result = std::make_pair("", ""); if (argVal.empty()) return result; auto defEndPos = argVal.find('=') == std::string::npos ? argVal.size() : argVal.find('='); result.first = argVal.substr(0, defEndPos); if (!result.first.empty() && defEndPos < argVal.size() - 1) { result.second = argVal.substr(defEndPos + 1, argVal.size() - defEndPos - 1); } return result; } // SPIRV Change Starts #ifdef ENABLE_SPIRV_CODEGEN /// Checks and collects the arguments for -fvk-{b|s|t|u}-shift into *shifts. static bool handleVkShiftArgs(const InputArgList &args, OptSpecifier id, const char *name, llvm::SmallVectorImpl<int32_t> *shifts, llvm::raw_ostream &errors) { const auto values = args.getAllArgValues(id); if (values.empty()) return true; if (!args.hasArg(OPT_spirv)) { errors << "-fvk-" << name << "-shift requires -spirv"; return false; } if (!args.getLastArgValue(OPT_fvk_bind_register).empty()) { errors << "-fvk-" << name << "-shift cannot be used together with -fvk-bind-register"; return false; } shifts->clear(); bool setForAll = false; for (const auto &val : values) { int32_t number = 0; if (val == "all") { number = -1; setForAll = true; } else { if (llvm::StringRef(val).getAsInteger(10, number)) { errors << "invalid -fvk-" << name << "-shift argument: " << val; return false; } if (number < 0) { errors << "negative -fvk-" << name << "-shift argument: " << val; return false; } } shifts->push_back(number); } if (setForAll && shifts->size() > 2) { errors << "setting all sets via -fvk-" << name << "-shift argument should be used alone"; return false; } return true; } // Check if any options that are unsupported with SPIR-V are used. static bool hasUnsupportedSpirvOption(const InputArgList &args, llvm::raw_ostream &errors) { // Note: The options checked here are non-exhaustive. A thorough audit of // available options and their current compatibility is needed to generate a // complete list. std::vector<OptSpecifier> unsupportedOpts = {OPT_Fd, OPT_Fre, OPT_Gec, OPT_Qstrip_reflect}; for (const auto &id : unsupportedOpts) { if (Arg *arg = args.getLastArg(id)) { errors << "-" << arg->getOption().getName() << " is not supported with -spirv"; return true; } } return false; } namespace { /// Maximum size of OpString instruction minus two operands static const uint32_t kDefaultMaximumSourceLength = 0xFFFDu; static const uint32_t kTestingMaximumSourceLength = 13u; } // namespace #endif // ENABLE_SPIRV_CODEGEN // SPIRV Change Ends namespace hlsl { LangStd parseHLSLVersion(llvm::StringRef Ver) { return llvm::StringSwitch<hlsl::LangStd>(Ver) .Case("2015", hlsl::LangStd::v2015) .Case("2016", hlsl::LangStd::v2016) .Case("2017", hlsl::LangStd::v2017) .Case("2018", hlsl::LangStd::v2018) .Case("2021", hlsl::LangStd::v2021) .Case("202x", hlsl::LangStd::v202x) .Default(hlsl::LangStd::vError); } namespace options { /// Reads all options from the given argument strings, populates opts, and /// validates reporting errors and warnings. int ReadDxcOpts(const OptTable *optionTable, unsigned flagsToInclude, const MainArgs &argStrings, DxcOpts &opts, llvm::raw_ostream &errors) { DXASSERT_NOMSG(optionTable != nullptr); opts.DefaultTextCodePage = DXC_CP_UTF8; unsigned missingArgIndex = 0, missingArgCount = 0; InputArgList Args = optionTable->ParseArgs(argStrings.getArrayRef(), missingArgIndex, missingArgCount, flagsToInclude); // Set DefaultTextCodePage early so it may influence error buffer // Default to UTF8 for compatibility llvm::StringRef encoding = Args.getLastArgValue(OPT_encoding); if (!encoding.empty()) { if (encoding.equals_lower("utf8")) { opts.DefaultTextCodePage = DXC_CP_UTF8; #ifdef _WIN32 } else if (encoding.equals_lower("utf16")) { opts.DefaultTextCodePage = DXC_CP_UTF16; // Only on Windows #else } else if (encoding.equals_lower("utf32")) { opts.DefaultTextCodePage = DXC_CP_UTF32; // Only on *nix #endif } else if (encoding.equals_lower("wide")) { opts.DefaultTextCodePage = DXC_CP_WIDE; } else { errors << "Unsupported value '" << encoding << "for -encoding option. Allowed values: wide, utf8, " #ifdef _WIN32 "utf16."; #else "utf32."; #endif return 1; } } // Verify consistency for external library support. opts.ExternalLib = Args.getLastArgValue(OPT_external_lib); opts.ExternalFn = Args.getLastArgValue(OPT_external_fn); if (opts.ExternalLib.empty()) { if (!opts.ExternalFn.empty()) { errors << "External function cannot be specified without an external " "library name."; return 1; } } else { if (opts.ExternalFn.empty()) { errors << "External library name requires specifying an external " "function name."; return 1; } } opts.ShowHelp = Args.hasFlag(OPT_help, OPT_INVALID, false); opts.ShowHelp |= (opts.ShowHelpHidden = Args.hasFlag(OPT__help_hidden, OPT_INVALID, false)); if (opts.ShowHelp) { return 0; } opts.ShowVersion = Args.hasFlag(OPT__version, OPT_INVALID, false); if (opts.ShowVersion) { return 0; } if (missingArgCount) { errors << "Argument to '" << Args.getArgString(missingArgIndex) << "' is missing."; return 1; } if (!Args.hasArg(hlsl::options::OPT_Qunused_arguments)) { for (const Arg *A : Args.filtered(OPT_UNKNOWN)) { errors << "Unknown argument: '" << A->getAsString(Args).c_str() << "'"; return 1; } } // Add macros from the command line. for (const Arg *A : Args.filtered(OPT_D)) { opts.Defines.push_back(A->getValue()); // If supporting OPT_U and included in filter, handle undefs. } opts.Defines .BuildDefines(); // Must be called after all defines are pushed back DXASSERT(opts.ExternalLib.empty() == opts.ExternalFn.empty(), "else flow above is incorrect"); opts.PreciseOutputs = Args.getAllArgValues(OPT_precise_output); // when no-warnings option is present, do not output warnings. opts.OutputWarnings = Args.hasFlag(OPT_INVALID, OPT_no_warnings, true); opts.EntryPoint = Args.getLastArgValue(OPT_entrypoint); // Entry point is required in arguments only for drivers; APIs take this // through an argument. The value should default to 'main', but we let the // caller apply this policy. if (opts.TargetProfile.empty()) { opts.TargetProfile = Args.getLastArgValue(OPT_target_profile); } if (opts.IsLibraryProfile()) { // Don't bother erroring out when entry is specified. We weren't always // doing this before, so doing so will break existing code. // Set entry point to impossible name. opts.EntryPoint = "lib.no::entry"; } else { if (Args.getLastArg(OPT_exports)) { errors << "library profile required when using -exports option"; return 1; } else if (Args.hasFlag(OPT_export_shaders_only, OPT_INVALID, false)) { errors << "library profile required when using -export-shaders-only option"; return 1; } else if (Args.getLastArg(OPT_default_linkage)) { errors << "library profile required when using -default-linkage option"; return 1; } } opts.EnableDX9CompatMode = Args.hasFlag(OPT_Gec, OPT_INVALID, false); llvm::StringRef ver = Args.getLastArgValue(OPT_hlsl_version); if (ver.empty()) { if (opts.EnableDX9CompatMode) opts.HLSLVersion = hlsl::LangStd::v2016; // Default to max supported // version with /Gec flag else opts.HLSLVersion = hlsl::LangStd::vLatest; // Default to latest version } else { opts.HLSLVersion = parseHLSLVersion(ver); if (opts.HLSLVersion == hlsl::LangStd::vError) { errors << "Unknown HLSL version: " << ver << ". Valid versions: " << hlsl::ValidVersionsStr; return 1; } } if (opts.HLSLVersion == hlsl::LangStd::v2015 && !(flagsToInclude & HlslFlags::ISenseOption)) { errors << "HLSL Version 2015 is only supported for language services"; return 1; } if (opts.EnableDX9CompatMode && opts.HLSLVersion > hlsl::LangStd::v2016) { errors << "/Gec is not supported with HLSLVersion " << (unsigned long)opts.HLSLVersion; return 1; } if (opts.HLSLVersion <= hlsl::LangStd::v2016) { opts.EnableFXCCompatMode = true; } // AssemblyCodeHex not supported (Fx) // OutputLibrary not supported (Fl) opts.AssemblyCode = Args.getLastArgValue(OPT_Fc); opts.DebugFile = Args.getLastArgValue(OPT_Fd); opts.ImportBindingTable = Args.getLastArgValue(OPT_import_binding_table); opts.BindingTableDefine = Args.getLastArgValue(OPT_binding_table_define); opts.ExtractPrivateFile = Args.getLastArgValue(OPT_getprivate); opts.Enable16BitTypes = Args.hasFlag(OPT_enable_16bit_types, OPT_INVALID, false); opts.OutputObject = Args.getLastArgValue(OPT_Fo); opts.OutputHeader = Args.getLastArgValue(OPT_Fh); opts.OutputWarningsFile = Args.getLastArgValue(OPT_Fe); opts.OutputReflectionFile = Args.getLastArgValue(OPT_Fre); opts.OutputRootSigFile = Args.getLastArgValue(OPT_Frs); opts.OutputShaderHashFile = Args.getLastArgValue(OPT_Fsh); opts.DiagnosticsFormat = Args.getLastArgValue(OPT_fdiagnostics_format_EQ, "clang"); opts.ShowOptionNames = Args.hasFlag(OPT_fdiagnostics_show_option, OPT_fno_diagnostics_show_option, true); opts.UseColor = Args.hasFlag(OPT_Cc, OPT_INVALID, false); opts.UseInstructionNumbers = Args.hasFlag(OPT_Ni, OPT_INVALID, false); opts.UseInstructionByteOffsets = Args.hasFlag(OPT_No, OPT_INVALID, false); opts.UseHexLiterals = Args.hasFlag(OPT_Lx, OPT_INVALID, false); if (Args.hasFlag(OPT_P, OPT_INVALID, false)) { // Default preprocess filename is InputName.i. llvm::SmallString<128> Path(Args.getLastArgValue(OPT_INPUT)); llvm::sys::path::replace_extension(Path, "i"); // Try to get preprocess filename from Fi. opts.Preprocess = Args.getLastArgValue(OPT_Fi, Path).str(); // Hack to support fxc style /P preprocess_filename. // When there're more than 1 Input file, use the input which is after /P as // preprocess. if (!Args.hasArg(OPT_Fi)) { std::vector<std::string> Inputs = Args.getAllArgValues(OPT_INPUT); if (Inputs.size() > 1) { llvm::opt::Arg *PArg = Args.getLastArg(OPT_P); std::string LastInput = Inputs.back(); llvm::opt::Arg *PrevInputArg = nullptr; for (llvm::opt::Arg *InputArg : Args.filtered(OPT_INPUT)) { // Find Input after /P. if ((PArg->getIndex() + 1) == InputArg->getIndex()) { opts.Preprocess = InputArg->getValue(); if (LastInput == opts.Preprocess && PrevInputArg) { // When InputArg is last Input, update it to other Input so // Args.getLastArgValue(OPT_INPUT) get expect Input. InputArg->getValues()[0] = PrevInputArg->getValues()[0]; } errors << "warning: -P " << opts.Preprocess << " is deprecated, please use -P -Fi " << opts.Preprocess << " instead.\n"; break; } PrevInputArg = InputArg; } } } } opts.AstDumpImplicit = Args.hasFlag(OPT_ast_dump_implicit, OPT_INVALID, false); // -ast-dump-implicit should imply -ast-dump. opts.AstDump = Args.hasFlag(OPT_ast_dump, OPT_INVALID, false) || opts.AstDumpImplicit; opts.WriteDependencies = Args.hasFlag(OPT_write_dependencies, OPT_INVALID, false); opts.OutputFileForDependencies = Args.getLastArgValue(OPT_write_dependencies_to); opts.DumpDependencies = Args.hasFlag(OPT_dump_dependencies, OPT_INVALID, false) || opts.WriteDependencies || !opts.OutputFileForDependencies.empty(); opts.CodeGenHighLevel = Args.hasFlag(OPT_fcgl, OPT_INVALID, false); opts.AllowPreserveValues = Args.hasFlag(OPT_preserve_intermediate_values, OPT_INVALID, false); opts.DebugInfo = Args.hasFlag(OPT__SLASH_Zi, OPT_INVALID, false); opts.DebugNameForBinary = Args.hasFlag(OPT_Zsb, OPT_INVALID, false); opts.DebugNameForSource = Args.hasFlag(OPT_Zss, OPT_INVALID, false); opts.VariableName = Args.getLastArgValue(OPT_Vn); opts.InputFile = Args.getLastArgValue(OPT_INPUT); opts.ForceRootSigVer = Args.getLastArgValue(OPT_force_rootsig_ver); if (opts.ForceRootSigVer.empty()) opts.ForceRootSigVer = Args.getLastArgValue(OPT_force_rootsig_ver_); opts.PrivateSource = Args.getLastArgValue(OPT_setprivate); opts.RootSignatureSource = Args.getLastArgValue(OPT_setrootsignature); opts.VerifyRootSignatureSource = Args.getLastArgValue(OPT_verifyrootsignature); opts.RootSignatureDefine = Args.getLastArgValue(OPT_rootsig_define); opts.ScanLimit = 0; llvm::StringRef limit = Args.getLastArgValue(OPT_memdep_block_scan_limit); if (!limit.empty()) opts.ScanLimit = std::stoul(std::string(limit)); for (std::string opt : Args.getAllArgValues(OPT_opt_disable)) opts.OptToggles.Toggles[llvm::StringRef(opt).lower()] = false; for (std::string opt : Args.getAllArgValues(OPT_opt_enable)) { std::string optimization = llvm::StringRef(opt).lower(); if (opts.OptToggles.Toggles.count(optimization) && !opts.OptToggles.Toggles[optimization]) { errors << "Contradictory use of -opt-disable and -opt-enable with \"" << llvm::StringRef(opt).lower() << "\""; return 1; } opts.OptToggles.Toggles[optimization] = true; } std::vector<std::string> ignoreSemDefs = Args.getAllArgValues(OPT_ignore_semdef); for (std::string &ignoreSemDef : ignoreSemDefs) { opts.IgnoreSemDefs.insert(ignoreSemDef); } std::vector<std::string> overrideSemDefs = Args.getAllArgValues(OPT_override_semdef); for (std::string &overrideSemDef : overrideSemDefs) { auto kv = ParseDefine(overrideSemDef); if (kv.first.empty()) continue; if (opts.OverrideSemDefs.find(kv.first) == opts.OverrideSemDefs.end()) { opts.OverrideSemDefs.insert(std::make_pair(kv.first, kv.second)); } else { opts.OverrideSemDefs[kv.first] = kv.second; } } std::vector<std::string> optSelects = Args.getAllArgValues(OPT_opt_select); for (unsigned i = 0; i + 1 < optSelects.size(); i += 2) { std::string optimization = llvm::StringRef(optSelects[i]).lower(); std::string selection = optSelects[i + 1]; if (opts.OptToggles.Selects.count(optimization) && selection.compare(opts.OptToggles.Selects[optimization])) { errors << "Contradictory -opt-selects for \"" << optimization << "\""; return 1; } opts.OptToggles.Selects[optimization] = selection; } if (!opts.ForceRootSigVer.empty() && opts.ForceRootSigVer != "rootsig_1_0" && opts.ForceRootSigVer != "rootsig_1_1") { errors << "Unsupported value '" << opts.ForceRootSigVer << "' for root signature profile."; return 1; } opts.IEEEStrict = Args.hasFlag(OPT_Gis, OPT_INVALID, false); opts.IgnoreLineDirectives = Args.hasFlag(OPT_ignore_line_directives, OPT_INVALID, false); opts.FloatDenormalMode = Args.getLastArgValue(OPT_denorm); // Check if a given denormalized value is valid if (!opts.FloatDenormalMode.empty()) { if (!(opts.FloatDenormalMode.equals_lower("preserve") || opts.FloatDenormalMode.equals_lower("ftz") || opts.FloatDenormalMode.equals_lower("any"))) { errors << "Unsupported value '" << opts.FloatDenormalMode << "' for denorm option."; return 1; } } llvm::StringRef auto_binding_space = Args.getLastArgValue(OPT_auto_binding_space); if (!auto_binding_space.empty()) { if (auto_binding_space.getAsInteger(10, opts.AutoBindingSpace)) { errors << "Unsupported value '" << auto_binding_space << "' for auto binding space."; return 1; } } opts.Exports = Args.getAllArgValues(OPT_exports); opts.DefaultLinkage = Args.getLastArgValue(OPT_default_linkage); if (!opts.DefaultLinkage.empty()) { if (!(opts.DefaultLinkage.equals_lower("internal") || opts.DefaultLinkage.equals_lower("external"))) { errors << "Unsupported value '" << opts.DefaultLinkage << "for -default-linkage option."; return 1; } } // Check options only allowed in shader model >= 6.2FPDenormalMode unsigned Major = 0; unsigned Minor = 0; if (!opts.TargetProfile.empty()) { if (!GetTargetVersionFromString(opts.TargetProfile, &Major, &Minor)) { errors << "unable to parse shader model."; return 1; } } if (opts.TargetProfile.empty() || Major < 6 || (Major == 6 && Minor < 2)) { if (!opts.FloatDenormalMode.empty()) { errors << "denorm option is only allowed for shader model 6.2 and above."; return 1; } } // /enable-16bit-types only allowed for HLSL 2018 and shader model 6.2 if (opts.Enable16BitTypes) { if (opts.TargetProfile.empty() || opts.HLSLVersion < hlsl::LangStd::v2018 || Major < 6 || (Major == 6 && Minor < 2)) { errors << "enable-16bit-types is only allowed for shader model >= 6.2 " "and HLSL Language >= 2018."; return 1; } } opts.DisableOptimizations = false; if (Arg *A = Args.getLastArg(OPT_O0, OPT_O1, OPT_O2, OPT_O3, OPT_Od)) { if (A->getOption().matches(OPT_O0)) opts.OptLevel = 0; if (A->getOption().matches(OPT_O1)) opts.OptLevel = 1; if (A->getOption().matches(OPT_O2)) opts.OptLevel = 2; if (A->getOption().matches(OPT_O3)) opts.OptLevel = 3; if (A->getOption().matches(OPT_Od)) { opts.DisableOptimizations = true; opts.OptLevel = 0; } } else opts.OptLevel = 3; opts.OptDump = Args.hasFlag(OPT_Odump, OPT_INVALID, false); opts.DisableValidation = Args.hasFlag(OPT_VD, OPT_INVALID, false); opts.AllResourcesBound = Args.hasFlag(OPT_all_resources_bound, OPT_INVALID, false); opts.AllResourcesBound = Args.hasFlag(OPT_all_resources_bound_, OPT_INVALID, opts.AllResourcesBound); opts.IgnoreOptSemDefs = Args.hasFlag(OPT_ignore_opt_semdefs, OPT_INVALID, false); opts.ColorCodeAssembly = Args.hasFlag(OPT_Cc, OPT_INVALID, false); opts.DefaultRowMajor = Args.hasFlag(OPT_Zpr, OPT_INVALID, false); opts.DefaultColMajor = Args.hasFlag(OPT_Zpc, OPT_INVALID, false); opts.DumpBin = Args.hasFlag(OPT_dumpbin, OPT_INVALID, false); opts.Link = Args.hasFlag(OPT_link, OPT_INVALID, false); bool NotUseLegacyCBufLoad = Args.hasFlag(OPT_no_legacy_cbuf_layout, OPT_INVALID, false); NotUseLegacyCBufLoad = Args.hasFlag(OPT_not_use_legacy_cbuf_load_, OPT_INVALID, NotUseLegacyCBufLoad); if (NotUseLegacyCBufLoad) errors << "warning: -no-legacy-cbuf-layout" << " is no longer supported and will be ignored." << " Future releases will not recognize it.\n"; opts.PackPrefixStable = Args.hasFlag(OPT_pack_prefix_stable, OPT_INVALID, false); opts.PackPrefixStable = Args.hasFlag(OPT_pack_prefix_stable_, OPT_INVALID, opts.PackPrefixStable); opts.PackOptimized = Args.hasFlag(OPT_pack_optimized, OPT_INVALID, false); opts.PackOptimized = Args.hasFlag(OPT_pack_optimized_, OPT_INVALID, opts.PackOptimized); opts.DisplayIncludeProcess = Args.hasFlag(OPT_H, OPT_INVALID, false); opts.WarningAsError = Args.hasFlag(OPT__SLASH_WX, OPT_INVALID, false); opts.AvoidFlowControl = Args.hasFlag(OPT_Gfa, OPT_INVALID, false); opts.PreferFlowControl = Args.hasFlag(OPT_Gfp, OPT_INVALID, false); opts.RecompileFromBinary = Args.hasFlag(OPT_recompile, OPT_INVALID, false); opts.StripDebug = Args.hasFlag(OPT_Qstrip_debug, OPT_INVALID, false); opts.EmbedDebug = Args.hasFlag(OPT_Qembed_debug, OPT_INVALID, false); opts.SourceInDebugModule = Args.hasFlag(OPT_Qsource_in_debug_module, OPT_INVALID, false); opts.SourceOnlyDebug = Args.hasFlag(OPT_Zs, OPT_INVALID, false); opts.PdbInPrivate = Args.hasFlag(OPT_Qpdb_in_private, OPT_INVALID, false); opts.StripRootSignature = Args.hasFlag(OPT_Qstrip_rootsignature, OPT_INVALID, false); opts.StripPrivate = Args.hasFlag(OPT_Qstrip_priv, OPT_INVALID, false); opts.StripReflection = Args.hasFlag(OPT_Qstrip_reflect, OPT_INVALID, false); opts.KeepReflectionInDxil = Args.hasFlag(OPT_Qkeep_reflect_in_dxil, OPT_INVALID, false); opts.StripReflectionFromDxil = Args.hasFlag(OPT_Qstrip_reflect_from_dxil, OPT_INVALID, false); opts.ExtractRootSignature = Args.hasFlag(OPT_extractrootsignature, OPT_INVALID, false); opts.DisassembleColorCoded = Args.hasFlag(OPT_Cc, OPT_INVALID, false); opts.DisassembleInstNumbers = Args.hasFlag(OPT_Ni, OPT_INVALID, false); opts.DisassembleByteOffset = Args.hasFlag(OPT_No, OPT_INVALID, false); opts.DisaseembleHex = Args.hasFlag(OPT_Lx, OPT_INVALID, false); opts.LegacyMacroExpansion = Args.hasFlag(OPT_flegacy_macro_expansion, OPT_INVALID, false); opts.LegacyResourceReservation = Args.hasFlag(OPT_flegacy_resource_reservation, OPT_INVALID, false); opts.ExportShadersOnly = Args.hasFlag(OPT_export_shaders_only, OPT_INVALID, false); opts.PrintBeforeAll = Args.hasFlag(OPT_print_before_all, OPT_INVALID, false); opts.PrintAfterAll = Args.hasFlag(OPT_print_after_all, OPT_INVALID, false); opts.ResMayAlias = Args.hasFlag(OPT_res_may_alias, OPT_INVALID, false); opts.ResMayAlias = Args.hasFlag(OPT_res_may_alias_, OPT_INVALID, opts.ResMayAlias); opts.ForceZeroStoreLifetimes = Args.hasFlag(OPT_force_zero_store_lifetimes, OPT_INVALID, false); // Lifetime markers on by default in 6.6 unless disabled explicitly opts.EnableLifetimeMarkers = Args.hasFlag(OPT_enable_lifetime_markers, OPT_disable_lifetime_markers, DXIL::CompareVersions(Major, Minor, 6, 6) >= 0); opts.ForceDisableLocTracking = Args.hasFlag(OPT_fdisable_loc_tracking, OPT_INVALID, false); opts.NewInlining = Args.hasFlag(OPT_fnew_inlining_behavior, OPT_INVALID, false); opts.TimeReport = Args.hasFlag(OPT_ftime_report, OPT_INVALID, false); opts.TimeTrace = Args.hasFlag(OPT_ftime_trace, OPT_INVALID, false) ? "-" : ""; opts.VerifyDiagnostics = Args.hasFlag(OPT_verify, OPT_INVALID, false); if (Args.hasArg(OPT_ftime_trace_EQ)) opts.TimeTrace = Args.getLastArgValue(OPT_ftime_trace_EQ); if (Arg *A = Args.getLastArg(OPT_ftime_trace_granularity_EQ)) { if (llvm::StringRef(A->getValue()) .getAsInteger(10, opts.TimeTraceGranularity)) { opts.TimeTraceGranularity = 500; errors << "Warning: Invalid value for -ftime-trace-granularity option " "specified, defaulting to " << opts.TimeTraceGranularity << " microseconds."; } } opts.EnablePayloadQualifiers = Args.hasFlag(OPT_enable_payload_qualifiers, OPT_INVALID, DXIL::CompareVersions(Major, Minor, 6, 7) >= 0); for (const std::string &value : Args.getAllArgValues(OPT_print_before)) { opts.PrintBefore.insert(value); } for (const std::string &value : Args.getAllArgValues(OPT_print_after)) { opts.PrintAfter.insert(value); } opts.EnablePayloadQualifiers &= !Args.hasFlag(OPT_disable_payload_qualifiers, OPT_INVALID, false); if (opts.EnablePayloadQualifiers && DXIL::CompareVersions(Major, Minor, 6, 6) < 0) { errors << "Invalid target for payload access qualifiers. Only lib_6_6 and " "beyond are supported."; return 1; } opts.HandleExceptions = !Args.hasFlag(OPT_disable_exception_handling, OPT_INVALID, false); if (opts.DefaultColMajor && opts.DefaultRowMajor) { errors << "Cannot specify /Zpr and /Zpc together, use /? to get usage " "information"; return 1; } if (opts.AvoidFlowControl && opts.PreferFlowControl) { errors << "Cannot specify /Gfa and /Gfp together, use /? to get usage " "information"; return 1; } if (opts.PackPrefixStable && opts.PackOptimized) { errors << "Cannot specify /pack_prefix_stable and /pack_optimized " "together, use /? to get usage information"; return 1; } // TODO: more fxc option check. // ERR_RES_MAY_ALIAS_ONLY_IN_CS_5 // ERR_NOT_ABLE_TO_FLATTEN on if that contain side effects // TODO: other front-end error. // ERR_RESOURCE_NOT_IN_TEMPLATE // ERR_COMPLEX_TEMPLATE_RESOURCE // ERR_RESOURCE_BIND_CONFLICT // ERR_TEMPLATE_VAR_CONFLICT // ERR_ATTRIBUTE_PARAM_SIDE_EFFECT if (opts.StripPrivate && !opts.PrivateSource.empty()) { errors << "Cannot specify /Qstrip_priv and /setprivate together."; return 1; } if (opts.PdbInPrivate && !opts.PrivateSource.empty()) { errors << "Cannot specify /Qpdb_in_private and /setprivate together."; return 1; } if (opts.StripPrivate && opts.PdbInPrivate) { errors << "Cannot specify /Qstrip_priv and /Qpdb_in_private together."; return 1; } if ((flagsToInclude & hlsl::options::DriverOption) && opts.InputFile.empty()) { // Input file is required in arguments only for drivers; APIs take this // through an argument. errors << "Required input file argument is missing. use -help to get more " "information."; return 1; } if (opts.OutputHeader.empty() && !opts.VariableName.empty()) { errors << "Cannot specify a header variable name when not writing a header."; return 1; } if (!opts.Preprocess.empty() && (!opts.OutputHeader.empty() || !opts.OutputObject.empty() || !opts.OutputWarnings || !opts.OutputWarningsFile.empty() || !opts.OutputReflectionFile.empty() || !opts.OutputRootSigFile.empty() || !opts.OutputShaderHashFile.empty())) { opts.OutputHeader = ""; opts.OutputObject = ""; opts.OutputWarnings = true; opts.OutputWarningsFile = ""; opts.OutputReflectionFile = ""; opts.OutputRootSigFile = ""; opts.OutputShaderHashFile = ""; errors << "Warning: compiler options ignored with Preprocess."; } if (opts.DumpBin) { if (opts.DisplayIncludeProcess || opts.AstDump || opts.DumpDependencies) { errors << "Cannot perform actions related to sources from a binary file."; return 1; } if (opts.AllResourcesBound || opts.AvoidFlowControl || opts.CodeGenHighLevel || opts.DebugInfo || opts.DefaultColMajor || opts.DefaultRowMajor || opts.Defines.size() != 0 || opts.DisableOptimizations || !opts.EntryPoint.empty() || !opts.ForceRootSigVer.empty() || opts.PreferFlowControl || !opts.TargetProfile.empty()) { errors << "Cannot specify compilation options when reading a binary file."; return 1; } } // XXX TODO: Sort this out, since it's required for new API, but a separate // argument for old APIs. if ((flagsToInclude & hlsl::options::DriverOption) && !(flagsToInclude & hlsl::options::RewriteOption) && opts.TargetProfile.empty() && !opts.DumpBin && opts.Preprocess.empty() && !opts.RecompileFromBinary) { // Target profile is required in arguments only for drivers when compiling; // APIs take this through an argument. errors << "Target profile argument is missing"; return 1; } llvm::StringRef valVersionStr = Args.getLastArgValue(OPT_validator_version); if (!valVersionStr.empty()) { // Parse "major.minor" version string auto verPair = valVersionStr.split("."); llvm::APInt major, minor; if (verPair.first.getAsInteger(0, major) || verPair.second.getAsInteger(0, minor)) { errors << "Format of validator version is \"<major>.<minor>\" (ex: " "\"1.4\")."; return 1; } uint64_t major64 = major.getLimitedValue(); uint64_t minor64 = minor.getLimitedValue(); if (major64 > DXIL::kDxilMajor || (major64 == DXIL::kDxilMajor && minor64 > DXIL::kDxilMinor)) { errors << "Validator version must be less than or equal to current " "internal version."; return 1; } if (major64 == 0 && minor64 != 0) { errors << "If validator major version is 0, minor version must also be 0."; return 1; } opts.ValVerMajor = (unsigned long)major64; opts.ValVerMinor = (unsigned long)minor64; } llvm::StringRef valSelectStr = Args.getLastArgValue(OPT_select_validator); if (!valSelectStr.empty()) { opts.SelectValidator = llvm::StringSwitch<ValidatorSelection>(valSelectStr) .Case("auto", ValidatorSelection::Auto) .Case("internal", ValidatorSelection::Internal) .Case("external", ValidatorSelection::External) .Default(ValidatorSelection::Invalid); if (opts.SelectValidator == ValidatorSelection::Invalid) { errors << "Unsupported value '" << valSelectStr << "for -select-validator option."; return 1; } } if (opts.IsLibraryProfile() && Minor == 0xF) { if (opts.ValVerMajor != UINT_MAX && opts.ValVerMajor != 0) { errors << "Offline library profile cannot be used with non-zero " "-validator-version."; return 1; } // Disable validation for offline link only target opts.DisableValidation = true; // ValVerMajor == 0 means that the module is not meant to ever be validated. opts.ValVerMajor = 0; opts.ValVerMinor = 0; } // These targets are only useful as an intermediate step towards linking to // matching shader targets without going through target downgrading at link // time. Disable lib_6_1 and lib_6_2 if /Vd is not present if (opts.IsLibraryProfile() && (Major < 6 || (Major == 6 && Minor < 3))) { if (!opts.DisableValidation) { errors << "Must disable validation for unsupported lib_6_1 or lib_6_2 " "targets."; return 1; } if (opts.ValVerMajor != UINT_MAX && opts.ValVerMajor != 0) { errors << "non-zero -validator-version cannot be used with library " "profiles lib_6_1 or lib_6_2."; return 1; } // ValVerMajor == 0 means that the module is not meant to ever be validated. opts.ValVerMajor = 0; opts.ValVerMinor = 0; } if (opts.KeepReflectionInDxil && opts.StripReflectionFromDxil) { errors << "-Qstrip_reflect_from_dxil mutually exclusive with " "-Qkeep_reflect_in_dxil."; return 1; } addDiagnosticArgs(Args, OPT_W_Group, OPT_W_value_Group, opts.Warnings); // SPIRV Change Starts #ifdef ENABLE_SPIRV_CODEGEN opts.GenSPIRV = Args.hasFlag(OPT_spirv, OPT_INVALID, false); opts.SpirvOptions.invertY = Args.hasFlag(OPT_fvk_invert_y, OPT_INVALID, false); opts.SpirvOptions.invertW = Args.hasFlag(OPT_fvk_use_dx_position_w, OPT_INVALID, false); opts.SpirvOptions.supportNonzeroBaseInstance = Args.hasFlag(OPT_fvk_support_nonzero_base_instance, OPT_INVALID, false); opts.SpirvOptions.supportNonzeroBaseVertex = Args.hasFlag(OPT_fvk_support_nonzero_base_vertex, OPT_INVALID, false); opts.SpirvOptions.useGlLayout = Args.hasFlag(OPT_fvk_use_gl_layout, OPT_INVALID, false); opts.SpirvOptions.useDxLayout = Args.hasFlag(OPT_fvk_use_dx_layout, OPT_INVALID, false); opts.SpirvOptions.useScalarLayout = Args.hasFlag(OPT_fvk_use_scalar_layout, OPT_INVALID, false); opts.SpirvOptions.useLegacyBufferMatrixOrder = Args.hasFlag(OPT_fspv_use_legacy_buffer_matrix_order, OPT_INVALID, false); opts.SpirvOptions.enableReflect = Args.hasFlag(OPT_fspv_reflect, OPT_INVALID, false); opts.SpirvOptions.noWarnIgnoredFeatures = Args.hasFlag(OPT_Wno_vk_ignored_features, OPT_INVALID, false); opts.SpirvOptions.noWarnEmulatedFeatures = Args.hasFlag(OPT_Wno_vk_emulated_features, OPT_INVALID, false); opts.SpirvOptions.flattenResourceArrays = Args.hasFlag(OPT_fspv_flatten_resource_arrays, OPT_INVALID, false); opts.SpirvOptions.reduceLoadSize = Args.hasFlag(OPT_fspv_reduce_load_size, OPT_INVALID, false); opts.SpirvOptions.fixFuncCallArguments = Args.hasFlag(OPT_fspv_fix_func_call_arguments, OPT_INVALID, false); opts.SpirvOptions.autoShiftBindings = Args.hasFlag(OPT_fvk_auto_shift_bindings, OPT_INVALID, false); opts.SpirvOptions.finiteMathOnly = Args.hasFlag(OPT_ffinite_math_only, OPT_fno_finite_math_only, false); opts.SpirvOptions.preserveBindings = Args.hasFlag(OPT_fspv_preserve_bindings, OPT_INVALID, false); opts.SpirvOptions.preserveInterface = Args.hasFlag(OPT_fspv_preserve_interface, OPT_INVALID, false); opts.SpirvOptions.allowRWStructuredBufferArrays = Args.hasFlag(OPT_fvk_allow_rwstructuredbuffer_arrays, OPT_INVALID, false); opts.SpirvOptions.enableMaximalReconvergence = Args.hasFlag(OPT_fspv_enable_maximal_reconvergence, OPT_INVALID, false); opts.SpirvOptions.useVulkanMemoryModel = Args.hasFlag(OPT_fspv_use_vulkan_memory_model, OPT_INVALID, false); if (!handleVkShiftArgs(Args, OPT_fvk_b_shift, "b", &opts.SpirvOptions.bShift, errors) || !handleVkShiftArgs(Args, OPT_fvk_t_shift, "t", &opts.SpirvOptions.tShift, errors) || !handleVkShiftArgs(Args, OPT_fvk_s_shift, "s", &opts.SpirvOptions.sShift, errors) || !handleVkShiftArgs(Args, OPT_fvk_u_shift, "u", &opts.SpirvOptions.uShift, errors)) return 1; opts.SpirvOptions.bindRegister = Args.getAllArgValues(OPT_fvk_bind_register); opts.SpirvOptions.bindGlobals = Args.getAllArgValues(OPT_fvk_bind_globals); opts.SpirvOptions.stageIoOrder = Args.getLastArgValue(OPT_fvk_stage_io_order_EQ, "decl"); if (opts.SpirvOptions.stageIoOrder != "alpha" && opts.SpirvOptions.stageIoOrder != "decl") { errors << "unknown Vulkan stage I/O location assignment order: " << opts.SpirvOptions.stageIoOrder; return 1; } for (const Arg *A : Args.filtered(OPT_fspv_extension_EQ)) { opts.SpirvOptions.allowedExtensions.push_back(A->getValue()); } opts.SpirvOptions.printAll = Args.hasFlag(OPT_fspv_print_all, OPT_INVALID, false); opts.SpirvOptions.debugInfoFile = opts.SpirvOptions.debugInfoSource = false; opts.SpirvOptions.debugInfoLine = opts.SpirvOptions.debugInfoTool = false; opts.SpirvOptions.debugInfoRich = false; opts.SpirvOptions.debugInfoVulkan = false; opts.SpirvOptions.debugSourceLen = kDefaultMaximumSourceLength; if (Args.hasArg(OPT_fspv_debug_EQ)) { opts.DebugInfo = true; for (const Arg *A : Args.filtered(OPT_fspv_debug_EQ)) { const llvm::StringRef v = A->getValue(); if (v == "file") { opts.SpirvOptions.debugInfoFile = true; } else if (v == "source") { opts.SpirvOptions.debugInfoFile = true; opts.SpirvOptions.debugInfoSource = true; } else if (v == "line") { opts.SpirvOptions.debugInfoFile = true; opts.SpirvOptions.debugInfoSource = true; opts.SpirvOptions.debugInfoLine = true; } else if (v == "tool") { opts.SpirvOptions.debugInfoTool = true; } else if (v == "rich") { opts.SpirvOptions.debugInfoFile = true; opts.SpirvOptions.debugInfoSource = false; opts.SpirvOptions.debugInfoLine = true; opts.SpirvOptions.debugInfoRich = true; } else if (v == "rich-with-source") { opts.SpirvOptions.debugInfoFile = true; opts.SpirvOptions.debugInfoSource = true; opts.SpirvOptions.debugInfoLine = true; opts.SpirvOptions.debugInfoRich = true; } else if (v == "vulkan") { // For test purposes only opts.SpirvOptions.debugInfoFile = true; opts.SpirvOptions.debugInfoSource = false; opts.SpirvOptions.debugInfoLine = true; opts.SpirvOptions.debugInfoRich = true; opts.SpirvOptions.debugInfoVulkan = true; } else if (v == "vulkan-with-source") { opts.SpirvOptions.debugInfoFile = true; opts.SpirvOptions.debugInfoSource = true; opts.SpirvOptions.debugInfoLine = true; opts.SpirvOptions.debugInfoRich = true; opts.SpirvOptions.debugInfoVulkan = true; } else if (v == "vulkan-with-source-test") { // For test purposes only opts.SpirvOptions.debugInfoFile = true; opts.SpirvOptions.debugInfoSource = true; opts.SpirvOptions.debugInfoLine = true; opts.SpirvOptions.debugInfoRich = true; opts.SpirvOptions.debugInfoVulkan = true; opts.SpirvOptions.debugSourceLen = kTestingMaximumSourceLength; } else { errors << "unknown SPIR-V debug info control parameter: " << v; return 1; } } } else if (opts.DebugInfo) { // By default turn on all categories opts.SpirvOptions.debugInfoFile = opts.SpirvOptions.debugInfoSource = true; opts.SpirvOptions.debugInfoLine = opts.SpirvOptions.debugInfoTool = true; } opts.SpirvOptions.targetEnv = Args.getLastArgValue(OPT_fspv_target_env_EQ, "vulkan1.0"); llvm::APInt maxId; // 0X3FFFFF is the default value for -fspv-max-id because it is the largest // value that is guaranteed to be allowed in all Vulkan implementations. if (Args.getLastArgValue(OPT_fspv_max_id, "3FFFFF").getAsInteger(16, maxId)) { errors << "-fspv-max-id must be an integer in hexadecimal format"; } opts.SpirvOptions.maxId = maxId.getLimitedValue(0xFFFFFFFF); // Handle -Oconfig=<comma-separated-list> option. uint32_t numOconfigs = 0; for (const Arg *A : Args.filtered(OPT_Oconfig)) { ++numOconfigs; if (numOconfigs > 1) { errors << "-Oconfig should not be specified more than once"; return 1; } if (Args.getLastArg(OPT_O0, OPT_O1, OPT_O2, OPT_O3)) { errors << "-Oconfig should not be used together with -O"; return 1; } for (const auto v : A->getValues()) { opts.SpirvOptions.optConfig.push_back(v); } } opts.SpirvOptions.entrypointName = Args.getLastArgValue(OPT_fspv_entrypoint_name_EQ); // Check for use of options not implemented in the SPIR-V backend. if (Args.hasFlag(OPT_spirv, OPT_INVALID, false) && hasUnsupportedSpirvOption(Args, errors)) return 1; opts.SpirvOptions.floatDenormalMode = Args.getLastArgValue(OPT_denorm); #else if (Args.hasFlag(OPT_spirv, OPT_INVALID, false) || Args.hasFlag(OPT_fvk_invert_y, OPT_INVALID, false) || Args.hasFlag(OPT_fvk_use_dx_position_w, OPT_INVALID, false) || Args.hasFlag(OPT_fvk_support_nonzero_base_instance, OPT_INVALID, false) || Args.hasFlag(OPT_fvk_use_gl_layout, OPT_INVALID, false) || Args.hasFlag(OPT_fvk_use_dx_layout, OPT_INVALID, false) || Args.hasFlag(OPT_fvk_use_scalar_layout, OPT_INVALID, false) || Args.hasFlag(OPT_fspv_use_legacy_buffer_matrix_order, OPT_INVALID, false) || Args.hasFlag(OPT_fspv_flatten_resource_arrays, OPT_INVALID, false) || Args.hasFlag(OPT_fspv_reduce_load_size, OPT_INVALID, false) || Args.hasFlag(OPT_fspv_reflect, OPT_INVALID, false) || Args.hasFlag(OPT_fspv_fix_func_call_arguments, OPT_INVALID, false) || Args.hasFlag(OPT_fspv_print_all, OPT_INVALID, false) || Args.hasFlag(OPT_Wno_vk_ignored_features, OPT_INVALID, false) || Args.hasFlag(OPT_Wno_vk_emulated_features, OPT_INVALID, false) || Args.hasFlag(OPT_fvk_auto_shift_bindings, OPT_INVALID, false) || Args.hasFlag(OPT_fvk_allow_rwstructuredbuffer_arrays, OPT_INVALID, false) || !Args.getLastArgValue(OPT_fvk_stage_io_order_EQ).empty() || !Args.getLastArgValue(OPT_fspv_debug_EQ).empty() || !Args.getLastArgValue(OPT_fspv_extension_EQ).empty() || !Args.getLastArgValue(OPT_fspv_target_env_EQ).empty() || !Args.getLastArgValue(OPT_Oconfig).empty() || !Args.getLastArgValue(OPT_fvk_bind_register).empty() || !Args.getLastArgValue(OPT_fvk_bind_globals).empty() || !Args.getLastArgValue(OPT_fvk_b_shift).empty() || !Args.getLastArgValue(OPT_fvk_t_shift).empty() || !Args.getLastArgValue(OPT_fvk_s_shift).empty() || !Args.getLastArgValue(OPT_fvk_u_shift).empty()) { errors << "SPIR-V CodeGen not available. " "Please recompile with -DENABLE_SPIRV_CODEGEN=ON."; return 1; } #endif // ENABLE_SPIRV_CODEGEN // SPIRV Change Ends // Validation for DebugInfo here because spirv uses same DebugInfo opt, // and legacy wrappers will add EmbedDebug in this case, leading to this // failing if placed before spirv path sets DebugInfo to true. if (opts.EmbedDebug && !opts.DebugInfo) { errors << "Must enable debug info with /Zi for /Qembed_debug"; return 1; } if (opts.DebugInfo && opts.SourceOnlyDebug) { errors << "Cannot specify both /Zi and /Zs"; return 1; } if (opts.SourceInDebugModule && opts.SourceOnlyDebug) { errors << "Cannot specify both /Qsource_in_debug_module and /Zs"; return 1; } if (opts.DebugInfo && !opts.DebugNameForBinary && !opts.DebugNameForSource) { opts.DebugNameForBinary = true; } else if (opts.DebugNameForBinary && opts.DebugNameForSource) { errors << "Cannot specify both /Zss and /Zsb"; return 1; } if (opts.DebugNameForSource && (!opts.DebugInfo && !opts.SourceOnlyDebug)) { errors << "/Zss requires debug info (/Zi or /Zs)"; return 1; } // Rewriter Options if (flagsToInclude & hlsl::options::RewriteOption) { opts.RWOpt.Unchanged = Args.hasFlag(OPT_rw_unchanged, OPT_INVALID, false); opts.RWOpt.SkipFunctionBody = Args.hasFlag(OPT_rw_skip_function_body, OPT_INVALID, false); opts.RWOpt.SkipStatic = Args.hasFlag(OPT_rw_skip_static, OPT_INVALID, false); opts.RWOpt.GlobalExternByDefault = Args.hasFlag(OPT_rw_global_extern_by_default, OPT_INVALID, false); opts.RWOpt.KeepUserMacro = Args.hasFlag(OPT_rw_keep_user_macro, OPT_INVALID, false); opts.RWOpt.ExtractEntryUniforms = Args.hasFlag(OPT_rw_extract_entry_uniforms, OPT_INVALID, false); opts.RWOpt.RemoveUnusedGlobals = Args.hasFlag(OPT_rw_remove_unused_globals, OPT_INVALID, false); opts.RWOpt.RemoveUnusedFunctions = Args.hasFlag(OPT_rw_remove_unused_functions, OPT_INVALID, false); opts.RWOpt.WithLineDirective = Args.hasFlag(OPT_rw_line_directive, OPT_INVALID, false); opts.RWOpt.DeclGlobalCB = Args.hasFlag(OPT_rw_decl_global_cb, OPT_INVALID, false); if (opts.EntryPoint.empty() && (opts.RWOpt.RemoveUnusedGlobals || opts.RWOpt.ExtractEntryUniforms || opts.RWOpt.RemoveUnusedFunctions)) { errors << "-remove-unused-globals, -remove-unused-functions and " "-extract-entry-uniforms requires entry point (-E) to be " "specified."; return 1; } } opts.Args = std::move(Args); return 0; } /// Sets up the specified DxcDllSupport instance as per the given options. int SetupDxcDllSupport(const DxcOpts &opts, dxc::DxcDllSupport &dxcSupport, llvm::raw_ostream &errors) { if (!opts.ExternalLib.empty()) { DXASSERT(!opts.ExternalFn.empty(), "else ReadDxcOpts should have failed"); HRESULT hrLoad = dxcSupport.InitializeForDll(opts.ExternalLib.data(), opts.ExternalFn.data()); if (DXC_FAILED(hrLoad)) { errors << "Unable to load support for external DLL " << opts.ExternalLib << " with function " << opts.ExternalFn << " - error 0x"; errors.write_hex(hrLoad); return 1; } } return 0; } void CopyArgsToWStrings(const InputArgList &inArgs, unsigned flagsToInclude, std::vector<std::wstring> &outArgs) { ArgStringList stringList; for (const Arg *A : inArgs) { if (A->getOption().hasFlag(flagsToInclude)) { A->renderAsInput(inArgs, stringList); } } for (const char *argText : stringList) { outArgs.emplace_back(Unicode::UTF8ToWideStringOrThrow(argText)); } } SerializeDxilFlags ComputeSerializeDxilFlags(const options::DxcOpts &opts) { SerializeDxilFlags SerializeFlags = SerializeDxilFlags::None; if (opts.EmbedPDBName()) { SerializeFlags |= SerializeDxilFlags::IncludeDebugNamePart; } if (opts.EmbedDebugInfo()) { SerializeFlags |= SerializeDxilFlags::IncludeDebugInfoPart; } if (opts.DebugNameForSource) { // Implies name part SerializeFlags |= SerializeDxilFlags::IncludeDebugNamePart; SerializeFlags |= SerializeDxilFlags::DebugNameDependOnSource; } else if (opts.DebugNameForBinary) { // Implies name part SerializeFlags |= SerializeDxilFlags::IncludeDebugNamePart; } if (!opts.KeepReflectionInDxil) { SerializeFlags |= SerializeDxilFlags::StripReflectionFromDxilPart; } if (!opts.StripReflection) { SerializeFlags |= SerializeDxilFlags::IncludeReflectionPart; } if (opts.StripRootSignature) { SerializeFlags |= SerializeDxilFlags::StripRootSignature; } return SerializeFlags; } } // namespace options } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcSupport/CMakeLists.txt
# Copyright (C) Microsoft Corporation. All rights reserved. # This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details. add_llvm_library(LLVMDxcSupport dxcapi.use.cpp dxcmem.cpp FileIOHelper.cpp Global.cpp HLSLOptions.cpp Unicode.cpp WinAdapter.cpp WinIncludes.cpp WinFunctions.cpp ) #generate header with platform-specific library name configure_file( ${LLVM_MAIN_SRC_DIR}/lib/DxcSupport/SharedLibAffix.inc ${LLVM_INCLUDE_DIR}/dxc/Support/SharedLibAffix.h ) target_link_libraries(LLVMDxcSupport PUBLIC LLVMSupport) add_dependencies(LLVMDxcSupport LLVMSupport TablegenHLSLOptions)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcSupport/LLVMBuild.txt
;===- ./lib/DxcSupport/LLVMBuild.txt ---------------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = DxcSupport parent = Libraries
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcSupport/Unicode.cpp
/////////////////////////////////////////////////////////////////////////////// // // // Unicode.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Provides utitlity functions to work with Unicode and other encodings. // // // /////////////////////////////////////////////////////////////////////////////// #ifdef _WIN32 #include <specstrings.h> #else #include <clocale> #endif #include "dxc/Support/Global.h" #include "dxc/Support/Unicode.h" #include "dxc/Support/WinIncludes.h" #include <assert.h> #include <string> #ifndef _WIN32 // MultiByteToWideChar which is a Windows-specific method. // This is a very simplistic implementation for non-Windows platforms. This // implementation completely ignores CodePage and dwFlags. int MultiByteToWideChar(uint32_t /*CodePage*/, uint32_t /*dwFlags*/, const char *lpMultiByteStr, int cbMultiByte, wchar_t *lpWideCharStr, int cchWideChar) { if (cbMultiByte == 0) { SetLastError(ERROR_INVALID_PARAMETER); return 0; } // if cbMultiByte is -1, it indicates that lpMultiByteStr is null-terminated // and the entire string should be processed. if (cbMultiByte == -1) { for (cbMultiByte = 0; lpMultiByteStr[cbMultiByte] != '\0'; ++cbMultiByte) ; // Add 1 for the null-terminating character. ++cbMultiByte; } // If zero is given as the destination size, this function should // return the required size (including the null-terminating character). // This is the behavior of mbstowcs when the target is null. if (cchWideChar == 0) { lpWideCharStr = nullptr; } else if (cchWideChar < cbMultiByte) { SetLastError(ERROR_INSUFFICIENT_BUFFER); return 0; } size_t rv; const char *prevLocale = setlocale(LC_ALL, nullptr); setlocale(LC_ALL, "en_US.UTF-8"); if (lpMultiByteStr[cbMultiByte - 1] != '\0') { char *srcStr = (char *)malloc((cbMultiByte + 1) * sizeof(char)); strncpy(srcStr, lpMultiByteStr, cbMultiByte); srcStr[cbMultiByte] = '\0'; rv = mbstowcs(lpWideCharStr, srcStr, cchWideChar); free(srcStr); } else { rv = mbstowcs(lpWideCharStr, lpMultiByteStr, cchWideChar); } if (prevLocale) setlocale(LC_ALL, prevLocale); if (rv == (size_t)cbMultiByte) return rv; return rv + 1; // mbstowcs excludes the terminating character } // WideCharToMultiByte is a Windows-specific method. // This is a very simplistic implementation for non-Windows platforms. This // implementation completely ignores CodePage and dwFlags. int WideCharToMultiByte(uint32_t /*CodePage*/, uint32_t /*dwFlags*/, const wchar_t *lpWideCharStr, int cchWideChar, char *lpMultiByteStr, int cbMultiByte, const char * /*lpDefaultChar*/, bool *lpUsedDefaultChar) { if (lpUsedDefaultChar) { *lpUsedDefaultChar = FALSE; } if (cchWideChar == 0) { SetLastError(ERROR_INVALID_PARAMETER); return 0; } // if cchWideChar is -1, it indicates that lpWideCharStr is null-terminated // and the entire string should be processed. if (cchWideChar == -1) { for (cchWideChar = 0; lpWideCharStr[cchWideChar] != '\0'; ++cchWideChar) ; // Add 1 for the null-terminating character. ++cchWideChar; } // If zero is given as the destination size, this function should // return the required size (including the null-terminating character). // This is the behavior of wcstombs when the target is null. if (cbMultiByte == 0) { lpMultiByteStr = nullptr; } else if (cbMultiByte < cchWideChar) { SetLastError(ERROR_INSUFFICIENT_BUFFER); return 0; } size_t rv; const char *prevLocale = setlocale(LC_ALL, nullptr); setlocale(LC_ALL, "en_US.UTF-8"); if (lpWideCharStr[cchWideChar - 1] != L'\0') { wchar_t *srcStr = (wchar_t *)malloc((cchWideChar + 1) * sizeof(wchar_t)); wcsncpy(srcStr, lpWideCharStr, cchWideChar); srcStr[cchWideChar] = L'\0'; rv = wcstombs(lpMultiByteStr, srcStr, cbMultiByte); free(srcStr); } else { rv = wcstombs(lpMultiByteStr, lpWideCharStr, cbMultiByte); } if (prevLocale) setlocale(LC_ALL, prevLocale); if (rv == (size_t)cchWideChar) return rv; return rv + 1; // mbstowcs excludes the terminating character } #endif // _WIN32 namespace Unicode { bool WideToEncodedString(const wchar_t *text, size_t cWide, DWORD cp, DWORD flags, std::string *pValue, bool *lossy) { BOOL usedDefaultChar; LPBOOL pUsedDefaultChar = (lossy == nullptr) ? nullptr : &usedDefaultChar; if (lossy != nullptr) *lossy = false; // Handle zero-length as a special case; it's a special value to indicate // errors in WideCharToMultiByte. if (cWide == 0) { pValue->resize(0); DXASSERT(lossy == nullptr || *lossy == false, "otherwise earlier initialization in this function was updated"); return true; } int cbUTF8 = ::WideCharToMultiByte(cp, flags, text, cWide, nullptr, 0, nullptr, pUsedDefaultChar); if (cbUTF8 == 0) return false; pValue->resize(cbUTF8); cbUTF8 = ::WideCharToMultiByte(cp, flags, text, cWide, &(*pValue)[0], pValue->size(), nullptr, pUsedDefaultChar); DXASSERT(cbUTF8 > 0, "otherwise contents have changed"); DXASSERT((*pValue)[pValue->size()] == '\0', "otherwise string didn't null-terminate after resize() call"); if (lossy != nullptr) *lossy = usedDefaultChar; return true; } bool UTF8ToWideString(const char *pUTF8, std::wstring *pWide) { size_t cbUTF8 = (pUTF8 == nullptr) ? 0 : strlen(pUTF8); return UTF8ToWideString(pUTF8, cbUTF8, pWide); } bool UTF8ToWideString(const char *pUTF8, size_t cbUTF8, std::wstring *pWide) { DXASSERT_NOMSG(pWide != nullptr); // Handle zero-length as a special case; it's a special value to indicate // errors in MultiByteToWideChar. if (cbUTF8 == 0) { pWide->resize(0); return true; } int cWide = ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, pUTF8, cbUTF8, nullptr, 0); if (cWide == 0) return false; pWide->resize(cWide); cWide = ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, pUTF8, cbUTF8, &(*pWide)[0], pWide->size()); DXASSERT(cWide > 0, "otherwise contents changed"); DXASSERT((*pWide)[pWide->size()] == L'\0', "otherwise wstring didn't null-terminate after resize() call"); return true; } std::wstring UTF8ToWideStringOrThrow(const char *pUTF8) { std::wstring result; if (!UTF8ToWideString(pUTF8, &result)) { throw hlsl::Exception(DXC_E_STRING_ENCODING_FAILED); } return result; } bool UTF8ToConsoleString(const char *text, size_t textLen, std::string *pValue, bool *lossy) { DXASSERT_NOMSG(text != nullptr); DXASSERT_NOMSG(pValue != nullptr); std::wstring text16; if (lossy != nullptr) *lossy = false; if (!UTF8ToWideString(text, textLen, &text16)) { return false; } return WideToConsoleString(text16.c_str(), text16.length(), pValue, lossy); } bool UTF8ToConsoleString(const char *text, std::string *pValue, bool *lossy) { return UTF8ToConsoleString(text, strlen(text), pValue, lossy); } bool WideToConsoleString(const wchar_t *text, size_t textLen, std::string *pValue, bool *lossy) { DXASSERT_NOMSG(text != nullptr); DXASSERT_NOMSG(pValue != nullptr); UINT cp = GetConsoleOutputCP(); return WideToEncodedString(text, textLen, cp, 0, pValue, lossy); } bool WideToConsoleString(const wchar_t *text, std::string *pValue, bool *lossy) { return WideToConsoleString(text, wcslen(text), pValue, lossy); } bool WideToUTF8String(const wchar_t *pWide, size_t cWide, std::string *pUTF8) { DXASSERT_NOMSG(pWide != nullptr); DXASSERT_NOMSG(pUTF8 != nullptr); return WideToEncodedString(pWide, cWide, CP_UTF8, 0, pUTF8, nullptr); } bool WideToUTF8String(const wchar_t *pWide, std::string *pUTF8) { DXASSERT_NOMSG(pWide != nullptr); DXASSERT_NOMSG(pUTF8 != nullptr); return WideToEncodedString(pWide, wcslen(pWide), CP_UTF8, 0, pUTF8, nullptr); } std::string WideToUTF8StringOrThrow(const wchar_t *pWide) { std::string result; if (!WideToUTF8String(pWide, &result)) { throw hlsl::Exception(DXC_E_STRING_ENCODING_FAILED); } return result; } bool UTF8BufferToWideComHeap(const char *pUTF8, wchar_t **ppWide) throw() { *ppWide = nullptr; int c = ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, pUTF8, -1, nullptr, 0); if (c == 0) return false; CComHeapPtr<wchar_t> p; if (!p.Allocate(c)) return false; DXVERIFY_NOMSG(0 < ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, pUTF8, -1, p.m_pData, c)); *ppWide = p.Detach(); return true; } bool UTF8BufferToWideBuffer(const char *pUTF8, int cbUTF8, wchar_t **ppWide, size_t *pcWide) throw() { *ppWide = nullptr; *pcWide = 0; if (cbUTF8 == 0 || (cbUTF8 == -1 && *pUTF8 == '\0')) { *ppWide = new (std::nothrow) wchar_t[1]; if (*ppWide == nullptr) return false; (*ppWide)[0] = L'\0'; *pcWide = 1; return true; } int c = ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, pUTF8, cbUTF8, nullptr, 0); if (c == 0) return false; // add space for null-terminator if we're not accounting for it if (cbUTF8 != -1) c += 1; wchar_t *p = new (std::nothrow) wchar_t[c]; if (p == nullptr) return false; int converted = ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, pUTF8, cbUTF8, p, c); (void)converted; DXASSERT(converted > 0, "otherwise contents have changed"); p[c - 1] = L'\0'; *ppWide = p; *pcWide = c; return true; } bool WideBufferToUTF8Buffer(const wchar_t *pWide, int cWide, char **ppUTF8, size_t *pcUTF8) throw() { *ppUTF8 = nullptr; *pcUTF8 = 0; if (cWide == 0 || (cWide == -1 && *pWide == '\0')) { *ppUTF8 = new (std::nothrow) char[1]; if (*ppUTF8 == nullptr) return false; (*ppUTF8)[0] = '\0'; *pcUTF8 = 1; return true; } int c1 = ::WideCharToMultiByte(CP_UTF8, // code page 0, // flags pWide, // string to convert cWide, // size, in chars, of string to convert nullptr, // output buffer 0, // size of output buffer nullptr, nullptr); if (c1 == 0) return false; // add space for null-terminator if we're not accounting for it if (cWide != -1) c1 += 1; char *p = new (std::nothrow) char[c1]; if (p == nullptr) return false; int converted = ::WideCharToMultiByte(CP_UTF8, 0, pWide, cWide, p, c1, nullptr, nullptr); (void)converted; DXASSERT(converted > 0, "otherwise contents have changed"); p[c1 - 1] = '\0'; *ppUTF8 = p; *pcUTF8 = c1; return true; } template <typename TChar> static bool IsStarMatchT(const TChar *pMask, size_t maskLen, const TChar *pName, size_t nameLen, TChar star) { if (maskLen == 0 && nameLen == 0) { return true; } if (maskLen == 0 || nameLen == 0) { return false; } if (pMask[maskLen - 1] == star) { // Prefix match. if (maskLen == 1) { // For just '*', everything is a match. return true; } --maskLen; if (maskLen > nameLen) { // Mask is longer than name, can't be a match. return false; } return 0 == memcmp(pMask, pName, sizeof(TChar) * maskLen); } else { // Exact match. if (nameLen != maskLen) { return false; } return 0 == memcmp(pMask, pName, sizeof(TChar) * nameLen); } } bool IsStarMatchUTF8(const char *pMask, size_t maskLen, const char *pName, size_t nameLen) { return IsStarMatchT<char>(pMask, maskLen, pName, nameLen, '*'); } bool IsStarMatchWide(const wchar_t *pMask, size_t maskLen, const wchar_t *pName, size_t nameLen) { return IsStarMatchT<wchar_t>(pMask, maskLen, pName, nameLen, L'*'); } } // namespace Unicode
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcSupport/SharedLibAffix.inc
/////////////////////////////////////////////////////////////////////////////// // // // SharedLibAffix.inc // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Defines shared library prefixes and suffixes for the build platform. // // // /////////////////////////////////////////////////////////////////////////////// #pragma once #cmakedefine CMAKE_SHARED_LIBRARY_PREFIX "@CMAKE_SHARED_LIBRARY_PREFIX@" #cmakedefine CMAKE_SHARED_LIBRARY_SUFFIX "@CMAKE_SHARED_LIBRARY_SUFFIX@" #ifndef CMAKE_SHARED_LIBRARY_PREFIX #define CMAKE_SHARED_LIBRARY_PREFIX #endif #ifndef CMAKE_SHARED_LIBRARY_SUFFIX #define CMAKE_SHARED_LIBRARY_SUFFIX #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcSupport/WinFunctions.cpp
//===-- WinFunctions.cpp - Windows Functions for other platforms --*- C++ //-*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines Windows-specific functions used in the codebase for // non-Windows platforms. // //===----------------------------------------------------------------------===// #ifndef _WIN32 #include <fcntl.h> #include <map> #include <string.h> #include <sys/stat.h> #include <unistd.h> #include "dxc/Support/WinFunctions.h" #include "dxc/Support/microcom.h" HRESULT StringCchPrintfA(char *dst, size_t dstSize, const char *format, ...) { va_list args; va_start(args, format); va_list argscopy; va_copy(argscopy, args); // C++11 snprintf can return the size of the resulting string if it was to be // constructed. size_t size = vsnprintf(nullptr, 0, format, argscopy) + 1; // Extra space for '\0' if (size > dstSize) { *dst = '\0'; } else { vsnprintf(dst, size, format, args); } va_end(argscopy); va_end(args); return S_OK; } HRESULT UIntAdd(UINT uAugend, UINT uAddend, UINT *puResult) { HRESULT hr; if ((uAugend + uAddend) >= uAugend) { *puResult = (uAugend + uAddend); hr = S_OK; } else { *puResult = 0xffffffff; hr = ERROR_ARITHMETIC_OVERFLOW; } return hr; } HRESULT IntToUInt(int in, UINT *out) { HRESULT hr; if (in >= 0) { *out = (UINT)in; hr = S_OK; } else { *out = 0xffffffff; hr = ERROR_ARITHMETIC_OVERFLOW; } return hr; } HRESULT SizeTToInt(size_t in, int *out) { HRESULT hr; if (in <= INT_MAX) { *out = (int)in; hr = S_OK; } else { *out = 0xffffffff; hr = ERROR_ARITHMETIC_OVERFLOW; } return hr; } HRESULT UInt32Mult(UINT a, UINT b, UINT *out) { uint64_t result = (uint64_t)a * (uint64_t)b; if (result > uint64_t(UINT_MAX)) return ERROR_ARITHMETIC_OVERFLOW; *out = (uint32_t)result; return S_OK; } int strnicmp(const char *str1, const char *str2, size_t count) { size_t i = 0; for (; i < count && str1[i] && str2[i]; ++i) { int d = std::tolower(str1[i]) - std::tolower(str2[i]); if (d != 0) return d; } if (i == count) { // All 'count' characters matched. return 0; } // str1 or str2 reached NULL before 'count' characters were compared. return str1[i] - str2[i]; } int _stricmp(const char *str1, const char *str2) { size_t i = 0; for (; str1[i] && str2[i]; ++i) { int d = std::tolower(str1[i]) - std::tolower(str2[i]); if (d != 0) return d; } return str1[i] - str2[i]; } int _wcsicmp(const wchar_t *str1, const wchar_t *str2) { size_t i = 0; for (; str1[i] && str2[i]; ++i) { int d = std::towlower(str1[i]) - std::towlower(str2[i]); if (d != 0) return d; } return str1[i] - str2[i]; } int _wcsnicmp(const wchar_t *str1, const wchar_t *str2, size_t n) { size_t i = 0; for (; i < n && str1[i] && str2[i]; ++i) { int d = std::towlower(str1[i]) - std::towlower(str2[i]); if (d != 0) return d; } if (i >= n) return 0; return str1[i] - str2[i]; } unsigned char _BitScanForward(unsigned long *Index, unsigned long Mask) { unsigned long l; if (!Mask) return 0; for (l = 0; !(Mask & 1); l++) Mask >>= 1; *Index = l; return 1; } HANDLE CreateFile2(LPCWSTR lpFileName, DWORD dwDesiredAccess, DWORD dwShareMode, DWORD dwCreationDisposition, void *pCreateExParams) { return CreateFileW(lpFileName, dwDesiredAccess, dwShareMode, pCreateExParams, dwCreationDisposition, FILE_ATTRIBUTE_NORMAL, nullptr); } HANDLE CreateFileW(LPCWSTR lpFileName, DWORD dwDesiredAccess, DWORD dwShareMode, void *lpSecurityAttributes, DWORD dwCreationDisposition, DWORD dwFlagsAndAttributes, HANDLE hTemplateFile) { CW2A pUtf8FileName(lpFileName); size_t fd = -1; int flags = 0; if (dwDesiredAccess & GENERIC_WRITE) if (dwDesiredAccess & GENERIC_READ) flags |= O_RDWR; else flags |= O_WRONLY; else // dwDesiredAccess may be 0, but open() demands something here. This is // mostly harmless flags |= O_RDONLY; if (dwCreationDisposition == CREATE_ALWAYS) flags |= (O_CREAT | O_TRUNC); if (dwCreationDisposition == OPEN_ALWAYS) flags |= O_CREAT; else if (dwCreationDisposition == CREATE_NEW) flags |= (O_CREAT | O_EXCL); else if (dwCreationDisposition == TRUNCATE_EXISTING) flags |= O_TRUNC; // OPEN_EXISTING represents default open() behavior // Catch Implementation limitations. assert(!lpSecurityAttributes && "security attributes not supported in CreateFileW yet"); assert(!hTemplateFile && "template file not supported in CreateFileW yet"); assert(dwFlagsAndAttributes == FILE_ATTRIBUTE_NORMAL && "Attributes other than NORMAL not supported in CreateFileW yet"); while ((int)(fd = open(pUtf8FileName, flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)) < 0) { if (errno != EINTR) return INVALID_HANDLE_VALUE; } return (HANDLE)fd; } BOOL GetFileSizeEx(HANDLE hFile, PLARGE_INTEGER lpFileSize) { int fd = (size_t)hFile; struct stat fdstat; int rv = fstat(fd, &fdstat); if (!rv) { lpFileSize->QuadPart = (LONGLONG)fdstat.st_size; return true; } return false; } BOOL ReadFile(HANDLE hFile, LPVOID lpBuffer, DWORD nNumberOfBytesToRead, LPDWORD lpNumberOfBytesRead, void *lpOverlapped) { size_t fd = (size_t)hFile; ssize_t rv = -1; // Implementation limitation assert(!lpOverlapped && "Overlapping not supported in ReadFile yet."); rv = read(fd, lpBuffer, nNumberOfBytesToRead); if (rv < 0) return false; *lpNumberOfBytesRead = rv; return true; } BOOL WriteFile(HANDLE hFile, LPCVOID lpBuffer, DWORD nNumberOfBytesToWrite, LPDWORD lpNumberOfBytesWritten, void *lpOverlapped) { size_t fd = (size_t)hFile; ssize_t rv = -1; // Implementation limitation assert(!lpOverlapped && "Overlapping not supported in WriteFile yet."); rv = write(fd, lpBuffer, nNumberOfBytesToWrite); if (rv < 0) return false; *lpNumberOfBytesWritten = rv; return true; } BOOL CloseHandle(HANDLE hObject) { int fd = (size_t)hObject; return !close(fd); } // Half-hearted implementation of a heap structure // Enables size queries, maximum allocation limit, and collective free at heap // destruction Does not perform any preallocation or allocation organization. // Does not respect any flags except for HEAP_ZERO_MEMORY struct SimpleAllocation { LPVOID ptr; SIZE_T size; }; struct SimpleHeap { std::map<LPCVOID, SimpleAllocation> allocs; SIZE_T maxSize, curSize; }; HANDLE HeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize) { SimpleHeap *simpHeap = new SimpleHeap; simpHeap->maxSize = dwMaximumSize; simpHeap->curSize = 0; return (HANDLE)simpHeap; } BOOL HeapDestroy(HANDLE hHeap) { SimpleHeap *simpHeap = (SimpleHeap *)hHeap; for (auto it = simpHeap->allocs.begin(), e = simpHeap->allocs.end(); it != e; it++) free(it->second.ptr); delete simpHeap; return true; } LPVOID HeapAlloc(HANDLE hHeap, DWORD dwFlags, SIZE_T dwBytes) { LPVOID ptr = nullptr; SimpleHeap *simpHeap = (SimpleHeap *)hHeap; if (simpHeap->maxSize && simpHeap->curSize + dwBytes > simpHeap->maxSize) return nullptr; if (dwFlags == HEAP_ZERO_MEMORY) ptr = calloc(1, dwBytes); else ptr = malloc(dwBytes); simpHeap->allocs[ptr] = {ptr, dwBytes}; simpHeap->curSize += dwBytes; return ptr; } LPVOID HeapReAlloc(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem, SIZE_T dwBytes) { LPVOID ptr = nullptr; SimpleHeap *simpHeap = (SimpleHeap *)hHeap; SIZE_T oSize = simpHeap->allocs[lpMem].size; if (simpHeap->maxSize && simpHeap->curSize - oSize + dwBytes > simpHeap->maxSize) return nullptr; ptr = realloc(lpMem, dwBytes); if (dwFlags == HEAP_ZERO_MEMORY && oSize < dwBytes) memset((char *)ptr + oSize, 0, dwBytes - oSize); simpHeap->allocs.erase(lpMem); simpHeap->curSize -= oSize; simpHeap->allocs[ptr] = {ptr, dwBytes}; simpHeap->curSize += dwBytes; return ptr; } BOOL HeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem) { SimpleHeap *simpHeap = (SimpleHeap *)hHeap; SIZE_T oSize = simpHeap->allocs[lpMem].size; free(lpMem); simpHeap->allocs.erase(lpMem); simpHeap->curSize -= oSize; return true; } SIZE_T HeapSize(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem) { SimpleHeap *simpHeap = (SimpleHeap *)hHeap; return simpHeap->allocs[lpMem].size; } static SimpleHeap g_processHeap; HANDLE GetProcessHeap() { return (HANDLE)&g_processHeap; } #endif // _WIN32
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcSupport/WinIncludes.cpp
//===- WinIncludes.cpp -----------------------------------------*- C++ -*-===// /////////////////////////////////////////////////////////////////////////////// // // // WinIncludes.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/Support/WinIncludes.h" #include "assert.h" #include "dxc/Support/microcom.h" #if defined(_WIN32) && !defined(DXC_DISABLE_ALLOCATOR_OVERRIDES) // CoGetMalloc from combaseapi.h is used #else struct DxcCoMalloc : public IMalloc { DxcCoMalloc() : m_dwRef(0){}; DXC_MICROCOM_ADDREF_RELEASE_IMPL(m_dwRef) STDMETHODIMP QueryInterface(REFIID riid, void **ppvObject) override { assert(false && "QueryInterface not implemented for DxcCoMalloc."); return E_NOINTERFACE; } void *STDMETHODCALLTYPE Alloc(SIZE_T size) override { return malloc(size); } void *STDMETHODCALLTYPE Realloc(void *ptr, SIZE_T size) override { return realloc(ptr, size); } void STDMETHODCALLTYPE Free(void *ptr) override { free(ptr); } SIZE_T STDMETHODCALLTYPE GetSize(void *pv) override { return -1; } int STDMETHODCALLTYPE DidAlloc(void *pv) override { return -1; } void STDMETHODCALLTYPE HeapMinimize(void) override {} private: DXC_MICROCOM_REF_FIELD(m_dwRef) }; HRESULT DxcCoGetMalloc(DWORD dwMemContext, IMalloc **ppMalloc) { *ppMalloc = new DxcCoMalloc; (*ppMalloc)->AddRef(); return S_OK; } #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcSupport/FileIOHelper.cpp
/////////////////////////////////////////////////////////////////////////////// // // // FileIOHelper.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // TODO: consider including an empty blob singleton (possibly UTF-8/16 too). // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/Support/FileIOHelper.h" #include "dxc/Support/Global.h" #include "dxc/Support/Unicode.h" #include "dxc/Support/WinFunctions.h" #include "dxc/Support/WinIncludes.h" #include "dxc/Support/microcom.h" #include "dxc/dxcapi.h" #include <algorithm> #include <memory> #ifdef _WIN32 #include <intsafe.h> #endif // CP_UTF8 is defined in WinNls.h, but others we use are not defined there. // See matching value definitions here: // https://docs.microsoft.com/en-us/windows/win32/intl/code-page-identifiers // We detect all these through BOM. #define CP_UTF16LE 1200 #define CP_UTF16BE 1201 #define CP_UTF32LE 12000 #define CP_UTF32BE 12001 // Alias for CP_UTF16LE, which is the only one we actually handle. #define CP_UTF16 CP_UTF16LE struct HeapMalloc : public IMalloc { public: ULONG STDMETHODCALLTYPE AddRef() override { return 1; } ULONG STDMETHODCALLTYPE Release() override { return 1; } STDMETHODIMP QueryInterface(REFIID iid, void **ppvObject) override { return DoBasicQueryInterface<IMalloc>(this, iid, ppvObject); } void *STDMETHODCALLTYPE Alloc(SIZE_T cb) override { return HeapAlloc(GetProcessHeap(), 0, cb); } void *STDMETHODCALLTYPE Realloc(void *pv, SIZE_T cb) override { return HeapReAlloc(GetProcessHeap(), 0, pv, cb); } void STDMETHODCALLTYPE Free(void *pv) override { HeapFree(GetProcessHeap(), 0, pv); } SIZE_T STDMETHODCALLTYPE GetSize(void *pv) override { return HeapSize(GetProcessHeap(), 0, pv); } int STDMETHODCALLTYPE DidAlloc(void *pv) override { return -1; // don't know } void STDMETHODCALLTYPE HeapMinimize(void) override {} }; static HeapMalloc g_HeapMalloc; namespace hlsl { IMalloc *GetGlobalHeapMalloc() throw() { return &g_HeapMalloc; } HRESULT ReadBinaryFile(IMalloc *pMalloc, LPCWSTR pFileName, void **ppData, DWORD *pDataSize) throw() { HANDLE hFile = CreateFileW(pFileName, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr); if (hFile == INVALID_HANDLE_VALUE) { return HRESULT_FROM_WIN32(GetLastError()); } CHandle h(hFile); LARGE_INTEGER FileSize; if (!GetFileSizeEx(hFile, &FileSize)) { return HRESULT_FROM_WIN32(GetLastError()); } if (FileSize.u.HighPart != 0) { return DXC_E_INPUT_FILE_TOO_LARGE; } char *pData = (char *)pMalloc->Alloc(FileSize.u.LowPart); if (!pData) { return E_OUTOFMEMORY; } DWORD BytesRead; if (!ReadFile(hFile, pData, FileSize.u.LowPart, &BytesRead, nullptr)) { HRESULT hr = HRESULT_FROM_WIN32(GetLastError()); pMalloc->Free(pData); return hr; } DXASSERT(FileSize.u.LowPart == BytesRead, "ReadFile operation failed"); *ppData = pData; *pDataSize = FileSize.u.LowPart; return S_OK; } HRESULT ReadBinaryFile(LPCWSTR pFileName, void **ppData, DWORD *pDataSize) throw() { return ReadBinaryFile(GetGlobalHeapMalloc(), pFileName, ppData, pDataSize); } HRESULT WriteBinaryFile(LPCWSTR pFileName, const void *pData, DWORD DataSize) throw() { HANDLE hFile = CreateFileW(pFileName, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr); if (hFile == INVALID_HANDLE_VALUE) { return HRESULT_FROM_WIN32(GetLastError()); } CHandle h(hFile); DWORD BytesWritten; if (!WriteFile(hFile, pData, DataSize, &BytesWritten, nullptr)) { return HRESULT_FROM_WIN32(GetLastError()); } DXASSERT(DataSize == BytesWritten, "WriteFile operation failed"); return S_OK; } UINT32 DxcCodePageFromBytes(const char *bytes, size_t byteLen) throw() { UINT32 codePage; if (byteLen >= 4) { // Now try to use the BOM to check for Unicode encodings char bom[4] = {bytes[0], bytes[1], bytes[2], bytes[3]}; if (memcmp(bom, "\xef\xbb\xbf", 3) == 0) { codePage = CP_UTF8; } else if (byteLen > 4 && memcmp(bom, "\xff\xfe\x00\x00", 4) == 0) { // byteLen > 4 to avoid mistaking empty UTF-16 LE BOM + null-terminator // for UTF-32 LE BOM without null-terminator. // If it's an empty UTF-32 LE with no null-termination, // it's harmless to interpret as empty UTF-16 LE with null-termination. codePage = CP_UTF32LE; } else if (memcmp(bom, "\x00\x00\xfe\xff", 4) == 0) { codePage = CP_UTF32BE; } else if (memcmp(bom, "\xff\xfe", 2) == 0) { codePage = CP_UTF16LE; } else if (memcmp(bom, "\xfe\xff", 2) == 0) { codePage = CP_UTF16BE; } else { codePage = CP_ACP; } } else { codePage = CP_ACP; } return codePage; } unsigned GetBomLengthFromCodePage(UINT32 codePage) { switch (codePage) { case CP_UTF8: return 3; case CP_UTF32LE: case CP_UTF32BE: return 4; case CP_UTF16LE: case CP_UTF16BE: return 2; default: return 0; } } static unsigned CharSizeFromCodePage(UINT32 codePage) { switch (codePage) { case CP_UTF32LE: case CP_UTF32BE: return 4; case CP_UTF16LE: case CP_UTF16BE: return 2; default: return 1; } } // We do not handle translation from these code page values. static bool IsUnsupportedUtfCodePage(UINT32 codePage) { switch (codePage) { #ifdef _WIN32 case CP_UTF32LE: #endif case CP_UTF32BE: case CP_UTF16BE: return true; } return false; } unsigned GetBomLengthFromBytes(const char *bytes, size_t byteLen) throw() { return GetBomLengthFromCodePage(DxcCodePageFromBytes(bytes, byteLen)); } #define IsSizeWcharAligned(size) (((size) & (sizeof(wchar_t) - 1)) == 0) template <typename _char> bool IsUtfBufferNullTerminated(LPCVOID pBuffer, SIZE_T size) { return (size >= sizeof(_char) && (size & (sizeof(_char) - 1)) == 0 && reinterpret_cast<const _char *>( pBuffer)[(size / sizeof(_char)) - 1] == 0); } static bool IsBufferNullTerminated(LPCVOID pBuffer, SIZE_T size, UINT32 codePage) { switch (codePage) { case DXC_CP_UTF8: return IsUtfBufferNullTerminated<char>(pBuffer, size); case DXC_CP_WIDE: return IsUtfBufferNullTerminated<wchar_t>(pBuffer, size); default: return false; } } template <typename _char> bool IsUtfBufferEmptyString(LPCVOID pBuffer, SIZE_T size) { return (size == 0 || (size == sizeof(_char) && reinterpret_cast<const _char *>(pBuffer)[0] == 0)); } static bool IsBufferEmptyString(LPCVOID pBuffer, SIZE_T size, UINT32 codePage) { switch (codePage) { case DXC_CP_UTF8: return IsUtfBufferEmptyString<char>(pBuffer, size); case DXC_CP_WIDE: return IsUtfBufferEmptyString<wchar_t>(pBuffer, size); default: return IsUtfBufferEmptyString<char>(pBuffer, size); } } class DxcBlobNoEncoding_Impl : public IDxcBlobEncoding { public: typedef IDxcBlobEncoding Base; static const UINT32 CodePage = CP_ACP; }; class DxcBlobWide_Impl : public IDxcBlobWide { public: static const UINT32 CodePage = DXC_CP_WIDE; typedef IDxcBlobWide Base; virtual LPCWSTR STDMETHODCALLTYPE GetStringPointer(void) override { if (GetBufferSize() < sizeof(wchar_t)) { return L""; // Special case for empty string blob } DXASSERT(IsSizeWcharAligned(GetBufferSize()), "otherwise, buffer size is not even multiple of wchar_t"); DXASSERT(*(const wchar_t *)((const BYTE *)GetBufferPointer() + GetBufferSize() - sizeof(wchar_t)) == L'\0', "otherwise buffer is not null terminated."); return (LPCWSTR)GetBufferPointer(); } virtual SIZE_T STDMETHODCALLTYPE GetStringLength(void) override { SIZE_T bufSize = GetBufferSize(); return bufSize ? (bufSize / sizeof(wchar_t)) - 1 : 0; } }; class DxcBlobUtf8_Impl : public IDxcBlobUtf8 { public: static const UINT32 CodePage = CP_UTF8; typedef IDxcBlobUtf8 Base; virtual LPCSTR STDMETHODCALLTYPE GetStringPointer(void) override { if (GetBufferSize() < sizeof(char)) { return ""; // Special case for empty string blob } DXASSERT(*((const char *)GetBufferPointer() + GetBufferSize() - 1) == '\0', "otherwise buffer is not null terminated."); return (LPCSTR)GetBufferPointer(); } virtual SIZE_T STDMETHODCALLTYPE GetStringLength(void) override { SIZE_T bufSize = GetBufferSize(); return bufSize ? (bufSize / sizeof(char)) - 1 : 0; } }; template <typename _T> class InternalDxcBlobEncoding_Impl : public _T { private: DXC_MICROCOM_TM_REF_FIELDS() // an underlying m_pMalloc that owns this LPCVOID m_Buffer = nullptr; IUnknown *m_Owner = nullptr; // IMalloc when MallocFree is true, owning the buffer SIZE_T m_BufferSize; unsigned m_EncodingKnown : 1; unsigned m_MallocFree : 1; UINT32 m_CodePage; public: DXC_MICROCOM_ADDREF_IMPL(m_dwRef) ULONG STDMETHODCALLTYPE Release() override { // Because blobs are also used by tests and utilities, we avoid using TLS. ULONG result = (ULONG)--m_dwRef; if (result == 0) { CComPtr<IMalloc> pTmp(m_pMalloc); this->InternalDxcBlobEncoding_Impl::~InternalDxcBlobEncoding_Impl(); pTmp->Free(this); } return result; } DXC_MICROCOM_TM_CTOR(InternalDxcBlobEncoding_Impl) HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void **ppvObject) override { return DoBasicQueryInterface<IDxcBlob, IDxcBlobEncoding, typename _T::Base>( this, iid, ppvObject); } ~InternalDxcBlobEncoding_Impl() { if (m_MallocFree) { ((IMalloc *)m_Owner)->Free(const_cast<void *>(m_Buffer)); } if (m_Owner != nullptr) { m_Owner->Release(); } } static HRESULT CreateFromBlob(IDxcBlob *pBlob, IMalloc *pMalloc, bool encodingKnown, UINT32 codePage, InternalDxcBlobEncoding_Impl **pEncoding) { *pEncoding = InternalDxcBlobEncoding_Impl::Alloc(pMalloc); if (*pEncoding == nullptr) { return E_OUTOFMEMORY; } DXASSERT(_T::CodePage == CP_ACP || (encodingKnown && _T::CodePage == codePage), "encoding must match type"); pBlob->AddRef(); (*pEncoding)->m_Owner = pBlob; (*pEncoding)->m_Buffer = pBlob->GetBufferPointer(); (*pEncoding)->m_BufferSize = pBlob->GetBufferSize(); (*pEncoding)->m_EncodingKnown = encodingKnown; (*pEncoding)->m_MallocFree = 0; (*pEncoding)->m_CodePage = codePage; (*pEncoding)->AddRef(); return S_OK; } static HRESULT CreateFromMalloc(LPCVOID buffer, IMalloc *pIMalloc, SIZE_T bufferSize, bool encodingKnown, UINT32 codePage, InternalDxcBlobEncoding_Impl **pEncoding) { *pEncoding = InternalDxcBlobEncoding_Impl::Alloc(pIMalloc); if (*pEncoding == nullptr) { *pEncoding = nullptr; return E_OUTOFMEMORY; } DXASSERT(_T::CodePage == CP_ACP || (encodingKnown && _T::CodePage == codePage), "encoding must match type"); DXASSERT(buffer || bufferSize == 0, "otherwise, nullptr with non-zero size provided"); pIMalloc->AddRef(); (*pEncoding)->m_Owner = pIMalloc; (*pEncoding)->m_Buffer = buffer; (*pEncoding)->m_BufferSize = bufferSize; (*pEncoding)->m_EncodingKnown = encodingKnown; (*pEncoding)->m_MallocFree = buffer != nullptr; (*pEncoding)->m_CodePage = codePage; (*pEncoding)->AddRef(); return S_OK; } void AdjustPtrAndSize(unsigned offset, unsigned size) { DXASSERT(offset < m_BufferSize, "else caller will overflow"); DXASSERT(offset + size <= m_BufferSize, "else caller will overflow"); m_Buffer = (const uint8_t *)m_Buffer + offset; m_BufferSize = size; } virtual LPVOID STDMETHODCALLTYPE GetBufferPointer(void) override { return const_cast<LPVOID>(m_Buffer); } virtual SIZE_T STDMETHODCALLTYPE GetBufferSize(void) override { return m_BufferSize; } virtual HRESULT STDMETHODCALLTYPE GetEncoding(BOOL *pKnown, UINT32 *pCodePage) override { *pKnown = m_EncodingKnown ? TRUE : FALSE; *pCodePage = m_CodePage; return S_OK; } // Relatively dangerous API. This means the buffer should be pinned for as // long as this object is alive. void ClearFreeFlag() { m_MallocFree = 0; } }; typedef InternalDxcBlobEncoding_Impl<DxcBlobNoEncoding_Impl> InternalDxcBlobEncoding; typedef InternalDxcBlobEncoding_Impl<DxcBlobWide_Impl> InternalDxcBlobWide; typedef InternalDxcBlobEncoding_Impl<DxcBlobUtf8_Impl> InternalDxcBlobUtf8; static HRESULT CodePageBufferToWide(UINT32 codePage, LPCVOID bufferPointer, SIZE_T bufferSize, CDxcMallocHeapPtr<WCHAR> &wideNewCopy, UINT32 *pConvertedCharCount) { *pConvertedCharCount = 0; // If the buffer is empty, don't dereference bufferPointer at all. // Keep the null terminator post-condition. if (IsBufferEmptyString(bufferPointer, bufferSize, codePage)) { if (!wideNewCopy.Allocate(1)) return E_OUTOFMEMORY; wideNewCopy.m_pData[0] = L'\0'; DXASSERT(*pConvertedCharCount == 0, "else didn't init properly"); return S_OK; } if (IsUnsupportedUtfCodePage(codePage)) return DXC_E_STRING_ENCODING_FAILED; // Calculate the length of the buffer in wchar_t elements. int numToConvertWide = MultiByteToWideChar(codePage, MB_ERR_INVALID_CHARS, (LPCSTR)bufferPointer, bufferSize, nullptr, 0); if (numToConvertWide == 0) return HRESULT_FROM_WIN32(GetLastError()); // Add an extra character in case we need it for null-termination unsigned buffSizeWide; IFR(Int32ToUInt32(numToConvertWide, &buffSizeWide)); IFR(UInt32Add(buffSizeWide, 1, &buffSizeWide)); IFR(UInt32Mult(buffSizeWide, sizeof(WCHAR), &buffSizeWide)); wideNewCopy.AllocateBytes(buffSizeWide); IFROOM(wideNewCopy.m_pData); int numActuallyConvertedWide = MultiByteToWideChar(codePage, MB_ERR_INVALID_CHARS, (LPCSTR)bufferPointer, bufferSize, wideNewCopy, buffSizeWide); if (numActuallyConvertedWide == 0) return HRESULT_FROM_WIN32(GetLastError()); if (numActuallyConvertedWide < 0) return E_OUTOFMEMORY; // If all we have is null terminator, return with zero count. if (wideNewCopy.m_pData[0] == L'\0') { DXASSERT(*pConvertedCharCount == 0, "else didn't init properly"); return S_OK; } if ((UINT32)numActuallyConvertedWide < (buffSizeWide / sizeof(wchar_t)) && wideNewCopy.m_pData[numActuallyConvertedWide - 1] != L'\0') { wideNewCopy.m_pData[numActuallyConvertedWide++] = L'\0'; } *pConvertedCharCount = (UINT32)numActuallyConvertedWide; return S_OK; } static HRESULT CodePageBufferToUtf8(UINT32 codePage, LPCVOID bufferPointer, SIZE_T bufferSize, IMalloc *pMalloc, CDxcMallocHeapPtr<char> &utf8NewCopy, UINT32 *pConvertedCharCount) { *pConvertedCharCount = 0; CDxcMallocHeapPtr<WCHAR> wideNewCopy(pMalloc); UINT32 wideCharCount = 0; const WCHAR *wideChars = nullptr; if (codePage == DXC_CP_WIDE) { if (!IsSizeWcharAligned(bufferSize)) throw hlsl::Exception(DXC_E_STRING_ENCODING_FAILED, "Error in encoding argument specified"); wideChars = (const WCHAR *)bufferPointer; wideCharCount = bufferSize / sizeof(wchar_t); } else if (bufferSize) { IFR(CodePageBufferToWide(codePage, bufferPointer, bufferSize, wideNewCopy, &wideCharCount)); wideChars = wideNewCopy.m_pData; } // If the buffer is empty, don't dereference bufferPointer at all. // Keep the null terminator post-condition. if (IsUtfBufferEmptyString<wchar_t>(wideChars, wideCharCount)) { if (!utf8NewCopy.Allocate(1)) return E_OUTOFMEMORY; DXASSERT(*pConvertedCharCount == 0, "else didn't init properly"); utf8NewCopy.m_pData[0] = '\0'; return S_OK; } int numToConvertUtf8 = WideCharToMultiByte( CP_UTF8, 0, wideChars, wideCharCount, NULL, 0, NULL, NULL); if (numToConvertUtf8 == 0) return HRESULT_FROM_WIN32(GetLastError()); UINT32 buffSizeUtf8; IFR(Int32ToUInt32(numToConvertUtf8, &buffSizeUtf8)); if (!IsBufferNullTerminated(wideChars, wideCharCount * sizeof(wchar_t), DXC_CP_WIDE)) { // If original size doesn't include null-terminator, // we have to add one to the converted buffer. IFR(UInt32Add(buffSizeUtf8, 1, &buffSizeUtf8)); } utf8NewCopy.AllocateBytes(buffSizeUtf8); IFROOM(utf8NewCopy.m_pData); int numActuallyConvertedUtf8 = WideCharToMultiByte(CP_UTF8, 0, wideChars, wideCharCount, utf8NewCopy, buffSizeUtf8, NULL, NULL); if (numActuallyConvertedUtf8 == 0) return HRESULT_FROM_WIN32(GetLastError()); if (numActuallyConvertedUtf8 < 0) return E_OUTOFMEMORY; if ((UINT32)numActuallyConvertedUtf8 < buffSizeUtf8 && utf8NewCopy.m_pData[numActuallyConvertedUtf8 - 1] != '\0') { utf8NewCopy.m_pData[numActuallyConvertedUtf8++] = '\0'; } *pConvertedCharCount = (UINT32)numActuallyConvertedUtf8; return S_OK; } static bool TryCreateEmptyBlobUtf(UINT32 codePage, IMalloc *pMalloc, IDxcBlobEncoding **ppBlobEncoding) { if (codePage == CP_UTF8) { InternalDxcBlobUtf8 *internalUtf8; IFR(InternalDxcBlobUtf8::CreateFromMalloc(nullptr, pMalloc, 0, true, codePage, &internalUtf8)); *ppBlobEncoding = internalUtf8; return true; } else if (codePage == DXC_CP_WIDE) { InternalDxcBlobWide *internalWide; IFR(InternalDxcBlobWide::CreateFromMalloc(nullptr, pMalloc, 0, true, codePage, &internalWide)); *ppBlobEncoding = internalWide; return true; } return false; } static bool TryCreateBlobUtfFromBlob(IDxcBlob *pFromBlob, UINT32 codePage, IMalloc *pMalloc, IDxcBlobEncoding **ppBlobEncoding) { // Try to create a IDxcBlobUtf8 or IDxcBlobWide if (IsBlobNullOrEmpty(pFromBlob)) { return TryCreateEmptyBlobUtf(codePage, pMalloc, ppBlobEncoding); } else if (IsBufferNullTerminated(pFromBlob->GetBufferPointer(), pFromBlob->GetBufferSize(), codePage)) { if (codePage == CP_UTF8) { InternalDxcBlobUtf8 *internalUtf8; IFR(InternalDxcBlobUtf8::CreateFromBlob(pFromBlob, pMalloc, true, codePage, &internalUtf8)); *ppBlobEncoding = internalUtf8; return true; } else if (codePage == DXC_CP_WIDE) { InternalDxcBlobWide *internalWide; IFR(InternalDxcBlobWide::CreateFromBlob(pFromBlob, pMalloc, true, codePage, &internalWide)); *ppBlobEncoding = internalWide; return true; } } return false; } HRESULT DxcCreateBlob(LPCVOID pPtr, SIZE_T size, bool bPinned, bool bCopy, bool encodingKnown, UINT32 codePage, IMalloc *pMalloc, IDxcBlobEncoding **ppBlobEncoding) throw() { IFRBOOL(!(bPinned && bCopy), E_INVALIDARG); IFRBOOL(ppBlobEncoding, E_INVALIDARG); *ppBlobEncoding = nullptr; bool bNullTerminated = encodingKnown ? IsBufferNullTerminated(pPtr, size, codePage) : false; unsigned bomSize = (encodingKnown && codePage != CP_ACP) ? GetBomLengthFromBytes((const char *)pPtr, size) : 0; if (bomSize) { // Adjust pPtr and size to skip BOM. // When !encodingKnown or codePage == CP_ACP, BOM will be skipped when // interpreting as text and translating to unicode, since at this point, // the buffer could be an arbitrary binary blob. // There is an odd corner case with BOM detection where an empty // non-null-terminated UTF-32 LE buffer with BOM would be interpreted // as an empty null-terminated UTF-16 LE buffer with a BOM. // This won't matter in the end, since both cases are empty buffers, and // will map to the empty buffer case with the desired codePage setting. pPtr = (const char *)pPtr + bomSize; size -= bomSize; } bool emptyString = !pPtr || !size; if (bNullTerminated) { DXASSERT_NOMSG(pPtr && size && encodingKnown); emptyString = size == CharSizeFromCodePage(codePage); } if (!pMalloc) pMalloc = DxcGetThreadMallocNoRef(); // Handle empty blob if (emptyString) { if (encodingKnown && TryCreateEmptyBlobUtf(codePage, pMalloc, ppBlobEncoding)) return S_OK; InternalDxcBlobEncoding *pInternalEncoding; IFR(InternalDxcBlobEncoding::CreateFromMalloc( nullptr, pMalloc, 0, encodingKnown, codePage, &pInternalEncoding)); *ppBlobEncoding = pInternalEncoding; if (pPtr && !(bCopy || bPinned)) { // Free memory as we're not taking ownership of it pMalloc->Free(const_cast<void *>(pPtr)); } return S_OK; } if (bPinned) { if (encodingKnown) { if (bNullTerminated) { if (codePage == CP_UTF8) { InternalDxcBlobUtf8 *internalUtf8; IFR(InternalDxcBlobUtf8::CreateFromMalloc(pPtr, pMalloc, size, true, codePage, &internalUtf8)); *ppBlobEncoding = internalUtf8; internalUtf8->ClearFreeFlag(); return S_OK; } else if (codePage == DXC_CP_WIDE) { InternalDxcBlobWide *internalWide; IFR(InternalDxcBlobWide::CreateFromMalloc(pPtr, pMalloc, size, true, codePage, &internalWide)); *ppBlobEncoding = internalWide; internalWide->ClearFreeFlag(); return S_OK; } } } InternalDxcBlobEncoding *internalEncoding; IFR(InternalDxcBlobEncoding::CreateFromMalloc( pPtr, pMalloc, size, encodingKnown, codePage, &internalEncoding)); *ppBlobEncoding = internalEncoding; internalEncoding->ClearFreeFlag(); return S_OK; } void *pData = const_cast<void *>(pPtr); SIZE_T newSize = size; CDxcMallocHeapPtr<char> heapCopy(pMalloc); if (bCopy) { if (encodingKnown) { if (!bNullTerminated) { if (codePage == CP_UTF8) { newSize += sizeof(char); bNullTerminated = true; } else if (codePage == DXC_CP_WIDE) { newSize += sizeof(wchar_t); bNullTerminated = true; } } } heapCopy.AllocateBytes(newSize); pData = heapCopy.m_pData; if (pData == nullptr) return E_OUTOFMEMORY; if (pPtr) memcpy(pData, pPtr, size); else memset(pData, 0, size); } if (bNullTerminated && codePage == CP_UTF8) { if (bCopy && newSize > size) ((char *)pData)[newSize - 1] = 0; InternalDxcBlobUtf8 *internalUtf8; IFR(InternalDxcBlobUtf8::CreateFromMalloc(pData, pMalloc, newSize, true, codePage, &internalUtf8)); *ppBlobEncoding = internalUtf8; } else if (bNullTerminated && codePage == DXC_CP_WIDE) { if (bCopy && newSize > size) ((wchar_t *)pData)[(newSize / sizeof(wchar_t)) - 1] = 0; InternalDxcBlobWide *internalWide; IFR(InternalDxcBlobWide::CreateFromMalloc(pData, pMalloc, newSize, true, codePage, &internalWide)); *ppBlobEncoding = internalWide; } else { InternalDxcBlobEncoding *internalEncoding; IFR(InternalDxcBlobEncoding::CreateFromMalloc( pData, pMalloc, newSize, encodingKnown, codePage, &internalEncoding)); *ppBlobEncoding = internalEncoding; } if (bCopy) heapCopy.Detach(); return S_OK; } HRESULT DxcCreateBlobEncodingFromBlob(IDxcBlob *pFromBlob, UINT32 offset, UINT32 length, bool encodingKnown, UINT32 codePage, IMalloc *pMalloc, IDxcBlobEncoding **ppBlobEncoding) throw() { IFRBOOL(pFromBlob, E_POINTER); IFRBOOL(ppBlobEncoding, E_POINTER); *ppBlobEncoding = nullptr; if (!pMalloc) pMalloc = DxcGetThreadMallocNoRef(); InternalDxcBlobEncoding *internalEncoding; if (offset || length) { UINT32 end; IFR(UInt32Add(offset, length, &end)); SIZE_T blobSize = pFromBlob->GetBufferSize(); if (end > blobSize) return E_INVALIDARG; IFR(InternalDxcBlobEncoding::CreateFromBlob( pFromBlob, pMalloc, encodingKnown, codePage, &internalEncoding)); internalEncoding->AdjustPtrAndSize(offset, length); *ppBlobEncoding = internalEncoding; return S_OK; } if (!encodingKnown || codePage == CP_UTF8) { IDxcBlobUtf8 *pBlobUtf8; if (SUCCEEDED(pFromBlob->QueryInterface(&pBlobUtf8))) { *ppBlobEncoding = pBlobUtf8; return S_OK; } } if (!encodingKnown || codePage == DXC_CP_WIDE) { IDxcBlobWide *pBlobWide; if (SUCCEEDED(pFromBlob->QueryInterface(&pBlobWide))) { *ppBlobEncoding = pBlobWide; return S_OK; } } CComPtr<IDxcBlobEncoding> pBlobEncoding; if (SUCCEEDED(pFromBlob->QueryInterface(&pBlobEncoding))) { BOOL thisEncodingKnown; UINT32 thisEncoding; IFR(pBlobEncoding->GetEncoding(&thisEncodingKnown, &thisEncoding)); bool encodingMatches = thisEncodingKnown && encodingKnown && codePage == thisEncoding; if (!encodingKnown && thisEncodingKnown) { codePage = thisEncoding; encodingKnown = thisEncodingKnown; encodingMatches = true; } if (encodingMatches) { if (!TryCreateBlobUtfFromBlob(pFromBlob, codePage, pMalloc, ppBlobEncoding)) { *ppBlobEncoding = pBlobEncoding.Detach(); } return S_OK; } if (encodingKnown) { if (TryCreateBlobUtfFromBlob(pFromBlob, codePage, pMalloc, ppBlobEncoding)) { return S_OK; } IFR(InternalDxcBlobEncoding::CreateFromBlob(pFromBlob, pMalloc, true, codePage, &internalEncoding)); *ppBlobEncoding = internalEncoding; return S_OK; } DXASSERT(!encodingKnown && !thisEncodingKnown, "otherwise, missing case"); *ppBlobEncoding = pBlobEncoding.Detach(); return S_OK; } if (encodingKnown && TryCreateBlobUtfFromBlob(pFromBlob, codePage, pMalloc, ppBlobEncoding)) { return S_OK; } IFR(InternalDxcBlobEncoding::CreateFromBlob(pFromBlob, pMalloc, encodingKnown, codePage, &internalEncoding)); *ppBlobEncoding = internalEncoding; return S_OK; } HRESULT DxcCreateBlobFromBlob(IDxcBlob *pBlob, UINT32 offset, UINT32 length, IDxcBlob **ppResult) throw() { IFRBOOL(ppResult, E_POINTER); *ppResult = nullptr; IDxcBlobEncoding *pResult; IFR(DxcCreateBlobEncodingFromBlob(pBlob, offset, length, false, 0, DxcGetThreadMallocNoRef(), &pResult)); *ppResult = pResult; return S_OK; } HRESULT DxcCreateBlobOnMalloc(LPCVOID pData, IMalloc *pIMalloc, UINT32 size, IDxcBlob **ppResult) throw() { IFRBOOL(ppResult, E_POINTER); *ppResult = nullptr; IDxcBlobEncoding *pResult; IFR(DxcCreateBlob(pData, size, false, false, false, 0, pIMalloc, &pResult)); *ppResult = pResult; return S_OK; } HRESULT DxcCreateBlobOnHeapCopy(LPCVOID pData, UINT32 size, IDxcBlob **ppResult) throw() { IFRBOOL(ppResult, E_POINTER); *ppResult = nullptr; IDxcBlobEncoding *pResult; IFR(DxcCreateBlob(pData, size, false, true, false, 0, DxcGetThreadMallocNoRef(), &pResult)); *ppResult = pResult; return S_OK; } HRESULT DxcCreateBlobFromFile(IMalloc *pMalloc, LPCWSTR pFileName, UINT32 *pCodePage, IDxcBlobEncoding **ppBlobEncoding) throw() { if (pFileName == nullptr || ppBlobEncoding == nullptr) { return E_POINTER; } LPVOID pData; DWORD dataSize; *ppBlobEncoding = nullptr; HRESULT hr = ReadBinaryFile(pMalloc, pFileName, &pData, &dataSize); if (FAILED(hr)) return hr; bool known = (pCodePage != nullptr); UINT32 codePage = (pCodePage != nullptr) ? *pCodePage : 0; hr = DxcCreateBlob(pData, dataSize, false, false, known, codePage, pMalloc, ppBlobEncoding); if (FAILED(hr)) pMalloc->Free(pData); return hr; } HRESULT DxcCreateBlobFromFile(LPCWSTR pFileName, UINT32 *pCodePage, IDxcBlobEncoding **ppBlobEncoding) throw() { return DxcCreateBlobFromFile(DxcGetThreadMallocNoRef(), pFileName, pCodePage, ppBlobEncoding); } HRESULT DxcCreateBlobWithEncodingSet(IMalloc *pMalloc, IDxcBlob *pBlob, UINT32 codePage, IDxcBlobEncoding **ppBlobEncoding) throw() { return DxcCreateBlobEncodingFromBlob(pBlob, 0, 0, true, codePage, pMalloc, ppBlobEncoding); } HRESULT DxcCreateBlobWithEncodingSet(IDxcBlob *pBlob, UINT32 codePage, IDxcBlobEncoding **ppBlobEncoding) throw() { return DxcCreateBlobEncodingFromBlob(pBlob, 0, 0, true, codePage, nullptr, ppBlobEncoding); } HRESULT DxcCreateBlobWithEncodingFromPinned(LPCVOID pText, UINT32 size, UINT32 codePage, IDxcBlobEncoding **pBlobEncoding) throw() { return DxcCreateBlob(pText, size, true, false, true, codePage, nullptr, pBlobEncoding); } HRESULT DxcCreateBlobFromPinned(LPCVOID pText, UINT32 size, IDxcBlob **pBlob) throw() { CComPtr<IDxcBlobEncoding> pBlobEncoding; DxcCreateBlob(pText, size, true, false, false, CP_ACP, nullptr, &pBlobEncoding); return pBlobEncoding.QueryInterface(pBlob); } HRESULT DxcCreateBlobWithEncodingFromStream(IStream *pStream, bool newInstanceAlways, UINT32 codePage, IDxcBlobEncoding **ppBlobEncoding) throw() { IFRBOOL(ppBlobEncoding, E_POINTER); *ppBlobEncoding = nullptr; if (pStream == nullptr) { return S_OK; } // Try to reuse the existing stream. if (!newInstanceAlways) { CComPtr<IDxcBlobEncoding> blobEncoding; if (SUCCEEDED(pStream->QueryInterface(&blobEncoding))) { *ppBlobEncoding = blobEncoding.Detach(); return S_OK; } } // Layer over the blob if possible. CComPtr<IDxcBlob> blob; if (SUCCEEDED(pStream->QueryInterface(&blob))) { return DxcCreateBlobWithEncodingSet(blob, codePage, ppBlobEncoding); } // Create a copy of contents, last resort. // TODO: implement when we find this codepath internally return E_NOTIMPL; } HRESULT DxcCreateBlobWithEncodingOnHeapCopy(LPCVOID pText, UINT32 size, UINT32 codePage, IDxcBlobEncoding **pBlobEncoding) throw() { return DxcCreateBlob(pText, size, false, true, true, codePage, nullptr, pBlobEncoding); } HRESULT DxcCreateBlobWithEncodingOnMalloc(LPCVOID pText, IMalloc *pIMalloc, UINT32 size, UINT32 codePage, IDxcBlobEncoding **pBlobEncoding) throw() { return DxcCreateBlob(pText, size, false, false, true, codePage, pIMalloc, pBlobEncoding); } HRESULT DxcCreateBlobWithEncodingOnMallocCopy( IMalloc *pIMalloc, LPCVOID pText, UINT32 size, UINT32 codePage, IDxcBlobEncoding **ppBlobEncoding) throw() { return DxcCreateBlob(pText, size, false, true, true, codePage, pIMalloc, ppBlobEncoding); } HRESULT DxcGetBlobAsUtf8(IDxcBlob *pBlob, IMalloc *pMalloc, IDxcBlobUtf8 **pBlobEncoding, UINT32 defaultCodePage) throw() { IFRBOOL(pBlob, E_POINTER); IFRBOOL(pBlobEncoding, E_POINTER); *pBlobEncoding = nullptr; if (SUCCEEDED(pBlob->QueryInterface(pBlobEncoding))) return S_OK; HRESULT hr; CComPtr<IDxcBlobEncoding> pSourceBlob; UINT32 codePage = CP_ACP; BOOL known = FALSE; if (SUCCEEDED(pBlob->QueryInterface(&pSourceBlob))) { if (FAILED(hr = pSourceBlob->GetEncoding(&known, &codePage))) return hr; } const char *bufferPointer = (const char *)pBlob->GetBufferPointer(); SIZE_T blobLen = pBlob->GetBufferSize(); unsigned bomSize = 0; if (!known || codePage == CP_ACP) { // Try to determine encoding from BOM. // If encoding was known, any BOM should have been stripped already. codePage = DxcCodePageFromBytes(bufferPointer, blobLen); bomSize = GetBomLengthFromCodePage(codePage); // BOM exists, adjust pointer and size to strip. bufferPointer += bomSize; blobLen -= bomSize; // If no BOM, use encoding option if specified if (codePage == CP_ACP && defaultCodePage != CP_ACP) { codePage = defaultCodePage; } } if (!pMalloc) pMalloc = DxcGetThreadMallocNoRef(); CDxcMallocHeapPtr<char> utf8NewCopy(pMalloc); UINT32 utf8CharCount = 0; // Reuse or copy the underlying blob depending on null-termination if (codePage == CP_UTF8) { utf8CharCount = blobLen; if (IsBufferNullTerminated(bufferPointer, blobLen, CP_UTF8)) { // Already null-terminated, reference other blob's memory InternalDxcBlobUtf8 *internalEncoding; hr = InternalDxcBlobUtf8::CreateFromBlob(pBlob, pMalloc, true, CP_UTF8, &internalEncoding); if (SUCCEEDED(hr)) { // Adjust if buffer has BOM; blobLen is already adjusted. if (bomSize) internalEncoding->AdjustPtrAndSize(bomSize, blobLen); *pBlobEncoding = internalEncoding; } return hr; } else if (utf8CharCount > 0) { // Copy to new buffer and null-terminate if (!utf8NewCopy.Allocate(utf8CharCount + 1)) return E_OUTOFMEMORY; memcpy(utf8NewCopy.m_pData, bufferPointer, utf8CharCount); utf8NewCopy.m_pData[utf8CharCount++] = 0; } } else { // Convert and create a blob that owns the encoding. if (FAILED(hr = CodePageBufferToUtf8(codePage, bufferPointer, blobLen, pMalloc, utf8NewCopy, &utf8CharCount))) { return hr; } DXASSERT( !utf8CharCount || IsBufferNullTerminated(utf8NewCopy.m_pData, utf8CharCount, CP_UTF8), "otherwise, CodePageBufferToUtf8 failed to null-terminate buffer."); } // At this point, we have new utf8NewCopy to wrap in a blob InternalDxcBlobUtf8 *internalEncoding; hr = InternalDxcBlobUtf8::CreateFromMalloc(utf8NewCopy.m_pData, pMalloc, utf8CharCount, true, CP_UTF8, &internalEncoding); if (SUCCEEDED(hr)) { *pBlobEncoding = internalEncoding; utf8NewCopy.Detach(); } return hr; } // This is kept for compatibility. HRESULT DxcGetBlobAsUtf8NullTerm(IDxcBlob *pBlob, IDxcBlobEncoding **ppBlobEncoding) throw() { IFRBOOL(pBlob, E_POINTER); IFRBOOL(ppBlobEncoding, E_POINTER); *ppBlobEncoding = nullptr; CComPtr<IDxcBlobUtf8> pConverted; IFR(DxcGetBlobAsUtf8(pBlob, DxcGetThreadMallocNoRef(), &pConverted)); pConverted->QueryInterface(ppBlobEncoding); return S_OK; } HRESULT DxcGetBlobAsWide(IDxcBlob *pBlob, IMalloc *pMalloc, IDxcBlobWide **pBlobEncoding) throw() { IFRBOOL(pBlob, E_POINTER); IFRBOOL(pBlobEncoding, E_POINTER); *pBlobEncoding = nullptr; if (SUCCEEDED(pBlob->QueryInterface(pBlobEncoding))) return S_OK; HRESULT hr; CComPtr<IDxcBlobEncoding> pSourceBlob; UINT32 codePage = CP_ACP; BOOL known = FALSE; if (SUCCEEDED(pBlob->QueryInterface(&pSourceBlob))) { if (FAILED(hr = pSourceBlob->GetEncoding(&known, &codePage))) return hr; } // Look for BOM and adjust pointer and size to skip if necessary. const char *bufferPointer = (const char *)pBlob->GetBufferPointer(); SIZE_T blobLen = pBlob->GetBufferSize(); unsigned bomSize = 0; if (!known || codePage == CP_ACP) { // Try to determine encoding from BOM. // If encoding was known, any BOM should have been stripped already. codePage = DxcCodePageFromBytes(bufferPointer, blobLen); bomSize = GetBomLengthFromCodePage(codePage); // BOM exists, adjust pointer and size to strip. bufferPointer += bomSize; blobLen -= bomSize; } if (!pMalloc) pMalloc = DxcGetThreadMallocNoRef(); CDxcMallocHeapPtr<WCHAR> wideNewCopy(pMalloc); UINT32 wideCharCount = 0; // Reuse or copy the underlying blob depending on null-termination if (codePage == DXC_CP_WIDE) { if (!IsSizeWcharAligned(blobLen)) return DXC_E_STRING_ENCODING_FAILED; wideCharCount = blobLen / sizeof(wchar_t); if (IsBufferNullTerminated(bufferPointer, blobLen, DXC_CP_WIDE)) { // Already null-terminated, reference other blob's memory InternalDxcBlobWide *internalEncoding; hr = InternalDxcBlobWide::CreateFromBlob(pBlob, pMalloc, true, DXC_CP_WIDE, &internalEncoding); if (SUCCEEDED(hr)) { // Adjust if buffer has BOM; blobLen is already adjusted. if (bomSize) internalEncoding->AdjustPtrAndSize(bomSize, blobLen); *pBlobEncoding = internalEncoding; } return hr; } else { // Copy to new buffer and null-terminate if (!wideNewCopy.Allocate(wideCharCount + 1)) return E_OUTOFMEMORY; memcpy(wideNewCopy.m_pData, bufferPointer, blobLen); wideNewCopy.m_pData[wideCharCount++] = 0; } } else { // Convert and create a blob that owns the encoding. if (FAILED(hr = CodePageBufferToWide(codePage, bufferPointer, blobLen, wideNewCopy, &wideCharCount))) { return hr; } } // At this point, we have new wideNewCopy to wrap in a blob DXASSERT(!wideCharCount || IsBufferNullTerminated( wideNewCopy.m_pData, wideCharCount * sizeof(wchar_t), DXC_CP_WIDE), "otherwise, failed to null-terminate buffer."); InternalDxcBlobWide *internalEncoding; hr = InternalDxcBlobWide::CreateFromMalloc( wideNewCopy.m_pData, pMalloc, wideCharCount * sizeof(WCHAR), true, DXC_CP_WIDE, &internalEncoding); if (SUCCEEDED(hr)) { *pBlobEncoding = internalEncoding; wideNewCopy.Detach(); } return hr; } bool IsBlobNullOrEmpty(IDxcBlob *pBlob) throw() { return pBlob == nullptr || pBlob->GetBufferSize() == 0; } /////////////////////////////////////////////////////////////////////////////// // Stream implementations. class MemoryStream : public AbstractMemoryStream, public IDxcBlob { private: DXC_MICROCOM_TM_REF_FIELDS() LPBYTE m_pMemory = nullptr; ULONG m_offset = 0; ULONG m_size = 0; ULONG m_allocSize = 0; public: DXC_MICROCOM_ADDREF_IMPL(m_dwRef) ULONG STDMETHODCALLTYPE Release() override { // Because memory streams are also used by tests and utilities, // we avoid using TLS. ULONG result = (ULONG)--m_dwRef; if (result == 0) { CComPtr<IMalloc> pTmp(m_pMalloc); this->MemoryStream::~MemoryStream(); pTmp->Free(this); } return result; } DXC_MICROCOM_TM_CTOR(MemoryStream) HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void **ppvObject) override { return DoBasicQueryInterface<IStream, ISequentialStream, IDxcBlob>( this, iid, ppvObject); } ~MemoryStream() { Reset(); } HRESULT Grow(ULONG targetSize) { if (targetSize < m_allocSize * 2) { targetSize = m_allocSize * 2; } return Reserve(targetSize); } void Reset() { if (m_pMemory != nullptr) { m_pMalloc->Free(m_pMemory); } m_pMemory = nullptr; m_offset = 0; m_size = 0; m_allocSize = 0; } // AbstractMemoryStream implementation. LPBYTE GetPtr() throw() override { return m_pMemory; } ULONG GetPtrSize() throw() override { return m_size; } LPBYTE Detach() throw() override { LPBYTE result = m_pMemory; m_pMemory = nullptr; Reset(); return result; } UINT64 GetPosition() throw() override { return m_offset; } HRESULT Reserve(ULONG targetSize) throw() override { if (m_pMemory == nullptr) { m_pMemory = (LPBYTE)m_pMalloc->Alloc(targetSize); if (m_pMemory == nullptr) { return E_OUTOFMEMORY; } } else { void *newPtr = m_pMalloc->Realloc(m_pMemory, targetSize); if (newPtr == nullptr) { return E_OUTOFMEMORY; } m_pMemory = (LPBYTE)newPtr; } m_allocSize = targetSize; return S_OK; } // IDxcBlob implementation. Requires no further writes. LPVOID STDMETHODCALLTYPE GetBufferPointer(void) override { return m_pMemory; } SIZE_T STDMETHODCALLTYPE GetBufferSize(void) override { return m_size; } // ISequentialStream implementation. HRESULT STDMETHODCALLTYPE Read(void *pv, ULONG cb, ULONG *pcbRead) override { if (!pv || !pcbRead) return E_POINTER; // If we seeked past the end, read nothing. if (m_offset > m_size) { *pcbRead = 0; return S_FALSE; } ULONG cbLeft = m_size - m_offset; *pcbRead = std::min(cb, cbLeft); memcpy(pv, m_pMemory + m_offset, *pcbRead); m_offset += *pcbRead; return (*pcbRead == cb) ? S_OK : S_FALSE; } HRESULT STDMETHODCALLTYPE Write(void const *pv, ULONG cb, ULONG *pcbWritten) override { if (!pv || !pcbWritten) return E_POINTER; if (cb + m_offset > m_allocSize) { HRESULT hr = Grow(cb + m_offset); if (FAILED(hr)) return hr; // Implicitly extend as needed with zeroes. if (m_offset > m_size) { memset(m_pMemory + m_size, 0, m_offset - m_size); } } *pcbWritten = cb; memcpy(m_pMemory + m_offset, pv, cb); m_offset += cb; m_size = std::max(m_size, m_offset); return S_OK; } // IStream implementation. HRESULT STDMETHODCALLTYPE SetSize(ULARGE_INTEGER val) override { if (val.u.HighPart != 0) { return E_OUTOFMEMORY; } if (val.u.LowPart > m_allocSize) { return Grow(m_allocSize); } if (val.u.LowPart < m_size) { m_size = val.u.LowPart; m_offset = std::min(m_offset, m_size); } else if (val.u.LowPart > m_size) { memset(m_pMemory + m_size, 0, val.u.LowPart - m_size); m_size = val.u.LowPart; } return S_OK; } HRESULT STDMETHODCALLTYPE CopyTo(IStream *, ULARGE_INTEGER, ULARGE_INTEGER *, ULARGE_INTEGER *) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE Commit(DWORD) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE Revert(void) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE LockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE UnlockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE Clone(IStream **) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE Seek(LARGE_INTEGER liDistanceToMove, DWORD dwOrigin, ULARGE_INTEGER *lpNewFilePointer) override { if (lpNewFilePointer != nullptr) { lpNewFilePointer->QuadPart = 0; } if (liDistanceToMove.u.HighPart != 0) { return E_FAIL; } ULONG targetOffset; switch (dwOrigin) { case STREAM_SEEK_SET: targetOffset = liDistanceToMove.u.LowPart; break; case STREAM_SEEK_CUR: targetOffset = liDistanceToMove.u.LowPart + m_offset; break; case STREAM_SEEK_END: targetOffset = liDistanceToMove.u.LowPart + m_size; break; default: return STG_E_INVALIDFUNCTION; } m_offset = targetOffset; if (lpNewFilePointer != nullptr) { lpNewFilePointer->u.LowPart = targetOffset; } return S_OK; } HRESULT STDMETHODCALLTYPE Stat(STATSTG *pStatstg, DWORD grfStatFlag) override { if (pStatstg == nullptr) { return E_POINTER; } ZeroMemory(pStatstg, sizeof(*pStatstg)); pStatstg->type = STGTY_STREAM; pStatstg->cbSize.u.LowPart = m_size; return S_OK; } }; class ReadOnlyBlobStream : public IStream { private: DXC_MICROCOM_TM_REF_FIELDS() CComPtr<IDxcBlob> m_pSource; LPBYTE m_pMemory; ULONG m_offset; ULONG m_size; public: DXC_MICROCOM_TM_ADDREF_RELEASE_IMPL() DXC_MICROCOM_TM_CTOR(ReadOnlyBlobStream) HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void **ppvObject) override { return DoBasicQueryInterface<IStream, ISequentialStream>(this, iid, ppvObject); } void Init(IDxcBlob *pSource) { m_pSource = pSource; m_offset = 0; m_size = m_pSource->GetBufferSize(); m_pMemory = (LPBYTE)m_pSource->GetBufferPointer(); // If Utf8Blob, exclude terminating null character CComPtr<IDxcBlobUtf8> utf8Source; if (m_size && SUCCEEDED(pSource->QueryInterface(&utf8Source))) m_size = utf8Source->GetStringLength(); } // ISequentialStream implementation. HRESULT STDMETHODCALLTYPE Read(void *pv, ULONG cb, ULONG *pcbRead) override { if (!pv || !pcbRead) return E_POINTER; ULONG cbLeft = m_size - m_offset; *pcbRead = std::min(cb, cbLeft); memcpy(pv, m_pMemory + m_offset, *pcbRead); m_offset += *pcbRead; return (*pcbRead == cb) ? S_OK : S_FALSE; } HRESULT STDMETHODCALLTYPE Write(void const *, ULONG, ULONG *) override { return STG_E_ACCESSDENIED; } // IStream implementation. HRESULT STDMETHODCALLTYPE SetSize(ULARGE_INTEGER val) override { return STG_E_ACCESSDENIED; } HRESULT STDMETHODCALLTYPE CopyTo(IStream *, ULARGE_INTEGER, ULARGE_INTEGER *, ULARGE_INTEGER *) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE Commit(DWORD) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE Revert(void) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE LockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE UnlockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE Clone(IStream **) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE Seek(LARGE_INTEGER liDistanceToMove, DWORD dwOrigin, ULARGE_INTEGER *lpNewFilePointer) override { if (lpNewFilePointer != nullptr) { lpNewFilePointer->QuadPart = 0; } if (liDistanceToMove.u.HighPart != 0) { return E_FAIL; } ULONG targetOffset; switch (dwOrigin) { case STREAM_SEEK_SET: targetOffset = liDistanceToMove.u.LowPart; break; case STREAM_SEEK_CUR: targetOffset = liDistanceToMove.u.LowPart + m_offset; break; case STREAM_SEEK_END: targetOffset = liDistanceToMove.u.LowPart + m_size; break; default: return STG_E_INVALIDFUNCTION; } // Do not implicility extend. if (targetOffset > m_size) { return E_FAIL; } m_offset = targetOffset; if (lpNewFilePointer != nullptr) { lpNewFilePointer->u.LowPart = targetOffset; } return S_OK; } HRESULT STDMETHODCALLTYPE Stat(STATSTG *pStatstg, DWORD grfStatFlag) override { if (pStatstg == nullptr) { return E_POINTER; } ZeroMemory(pStatstg, sizeof(*pStatstg)); pStatstg->type = STGTY_STREAM; pStatstg->cbSize.u.LowPart = m_size; return S_OK; } }; class FixedSizeMemoryStream : public AbstractMemoryStream { private: DXC_MICROCOM_TM_REF_FIELDS() LPBYTE m_pBuffer; ULONG m_offset; ULONG m_size; public: DXC_MICROCOM_TM_ADDREF_RELEASE_IMPL() DXC_MICROCOM_TM_CTOR(FixedSizeMemoryStream) HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void **ppvObject) override { return DoBasicQueryInterface<IStream, ISequentialStream>(this, iid, ppvObject); } void Init(LPBYTE pBuffer, size_t size) { m_pBuffer = pBuffer; m_offset = 0; m_size = size; } // ISequentialStream implementation. HRESULT STDMETHODCALLTYPE Read(void *pv, ULONG cb, ULONG *pcbRead) override { if (!pv || !pcbRead) return E_POINTER; ULONG cbLeft = m_size - m_offset; *pcbRead = std::min(cb, cbLeft); memcpy(pv, m_pBuffer + m_offset, *pcbRead); m_offset += *pcbRead; return (*pcbRead == cb) ? S_OK : S_FALSE; } HRESULT STDMETHODCALLTYPE Write(void const *pv, ULONG cb, ULONG *pcbWritten) override { if (!pv || !pcbWritten) return E_POINTER; ULONG cbLeft = m_size - m_offset; *pcbWritten = std::min(cb, cbLeft); memcpy(m_pBuffer + m_offset, pv, *pcbWritten); m_offset += *pcbWritten; return (*pcbWritten == cb) ? S_OK : S_FALSE; } // IStream implementation. HRESULT STDMETHODCALLTYPE SetSize(ULARGE_INTEGER val) override { return STG_E_ACCESSDENIED; } HRESULT STDMETHODCALLTYPE CopyTo(IStream *, ULARGE_INTEGER, ULARGE_INTEGER *, ULARGE_INTEGER *) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE Commit(DWORD) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE Revert(void) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE LockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE UnlockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE Clone(IStream **) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE Seek(LARGE_INTEGER, DWORD, ULARGE_INTEGER *) override { return E_NOTIMPL; } HRESULT STDMETHODCALLTYPE Stat(STATSTG *pStatstg, DWORD grfStatFlag) override { if (pStatstg == nullptr) { return E_POINTER; } ZeroMemory(pStatstg, sizeof(*pStatstg)); pStatstg->type = STGTY_STREAM; pStatstg->cbSize.u.LowPart = m_size; return S_OK; } // AbstractMemoryStream implementation LPBYTE GetPtr() throw() override { return m_pBuffer; } ULONG GetPtrSize() throw() override { return m_size; } LPBYTE Detach() throw() override { LPBYTE result = m_pBuffer; m_pBuffer = nullptr; m_size = 0; m_offset = 0; return result; } UINT64 GetPosition() throw() override { return m_offset; } HRESULT Reserve(ULONG targetSize) throw() override { return targetSize <= m_size ? S_OK : E_BOUNDS; } }; HRESULT CreateMemoryStream(IMalloc *pMalloc, AbstractMemoryStream **ppResult) throw() { if (pMalloc == nullptr || ppResult == nullptr) { return E_POINTER; } CComPtr<MemoryStream> stream = MemoryStream::Alloc(pMalloc); *ppResult = stream.Detach(); return (*ppResult == nullptr) ? E_OUTOFMEMORY : S_OK; } HRESULT CreateReadOnlyBlobStream(IDxcBlob *pSource, IStream **ppResult) throw() { if (pSource == nullptr || ppResult == nullptr) { return E_POINTER; } CComPtr<ReadOnlyBlobStream> stream = ReadOnlyBlobStream::Alloc(DxcGetThreadMallocNoRef()); if (stream.p) { stream->Init(pSource); } *ppResult = stream.Detach(); return (*ppResult == nullptr) ? E_OUTOFMEMORY : S_OK; } HRESULT CreateFixedSizeMemoryStream(LPBYTE pBuffer, size_t size, AbstractMemoryStream **ppResult) throw() { if (pBuffer == nullptr || ppResult == nullptr) { return E_POINTER; } CComPtr<FixedSizeMemoryStream> stream = FixedSizeMemoryStream::Alloc(DxcGetThreadMallocNoRef()); if (stream.p) { stream->Init(pBuffer, size); } *ppResult = stream.Detach(); return (*ppResult == nullptr) ? E_OUTOFMEMORY : S_OK; } } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/LTO/LTOCodeGenerator.cpp
//===-LTOCodeGenerator.cpp - LLVM Link Time Optimizer ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the Link Time Optimization library. This library is // intended to be used by linker to optimize code at link time. // //===----------------------------------------------------------------------===// #include "llvm/LTO/LTOCodeGenerator.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Analysis/Passes.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Bitcode/ReaderWriter.h" #include "llvm/CodeGen/RuntimeLibcalls.h" #include "llvm/Config/config.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/Mangler.h" #include "llvm/IR/Module.h" #include "llvm/IR/Verifier.h" #include "llvm/InitializePasses.h" #include "llvm/LTO/LTOModule.h" #include "llvm/Linker/Linker.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/SubtargetFeature.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Host.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Signals.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/TargetSelect.h" #include "llvm/Support/ToolOutputFile.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetSubtargetInfo.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/IPO/PassManagerBuilder.h" #include "llvm/Transforms/ObjCARC.h" #include <system_error> using namespace llvm; const char* LTOCodeGenerator::getVersionString() { #ifdef LLVM_VERSION_INFO return PACKAGE_NAME " version " PACKAGE_VERSION ", " LLVM_VERSION_INFO; #else return PACKAGE_NAME " version " PACKAGE_VERSION; #endif } LTOCodeGenerator::LTOCodeGenerator() : Context(getGlobalContext()), IRLinker(new Module("ld-temp.o", Context)) { initializeLTOPasses(); } LTOCodeGenerator::LTOCodeGenerator(std::unique_ptr<LLVMContext> Context) : OwnedContext(std::move(Context)), Context(*OwnedContext), IRLinker(new Module("ld-temp.o", *OwnedContext)) { initializeLTOPasses(); } void LTOCodeGenerator::destroyMergedModule() { if (OwnedModule) { assert(IRLinker.getModule() == &OwnedModule->getModule() && "The linker's module should be the same as the owned module"); delete OwnedModule; OwnedModule = nullptr; } else if (IRLinker.getModule()) IRLinker.deleteModule(); } LTOCodeGenerator::~LTOCodeGenerator() { destroyMergedModule(); delete TargetMach; TargetMach = nullptr; for (std::vector<char *>::iterator I = CodegenOptions.begin(), E = CodegenOptions.end(); I != E; ++I) free(*I); } // Initialize LTO passes. Please keep this funciton in sync with // PassManagerBuilder::populateLTOPassManager(), and make sure all LTO // passes are initialized. void LTOCodeGenerator::initializeLTOPasses() { PassRegistry &R = *PassRegistry::getPassRegistry(); initializeInternalizePassPass(R); initializeIPSCCPPass(R); initializeGlobalOptPass(R); initializeConstantMergePass(R); initializeDAHPass(R); initializeInstructionCombiningPassPass(R); initializeSimpleInlinerPass(R); initializePruneEHPass(R); initializeGlobalDCEPass(R); initializeArgPromotionPass(R); initializeJumpThreadingPass(R); initializeSROAPass(R); initializeSROA_DTPass(R); initializeSROA_SSAUpPass(R); initializeFunctionAttrsPass(R); initializeGlobalsModRefPass(R); initializeLICMPass(R); initializeMergedLoadStoreMotionPass(R); initializeGVNPass(R); initializeMemCpyOptPass(R); initializeDCEPass(R); initializeCFGSimplifyPassPass(R); } bool LTOCodeGenerator::addModule(LTOModule *mod) { assert(&mod->getModule().getContext() == &Context && "Expected module in same context"); bool ret = IRLinker.linkInModule(&mod->getModule()); const std::vector<const char*> &undefs = mod->getAsmUndefinedRefs(); for (int i = 0, e = undefs.size(); i != e; ++i) AsmUndefinedRefs[undefs[i]] = 1; return !ret; } void LTOCodeGenerator::setModule(LTOModule *Mod) { assert(&Mod->getModule().getContext() == &Context && "Expected module in same context"); // Delete the old merged module. destroyMergedModule(); AsmUndefinedRefs.clear(); OwnedModule = Mod; IRLinker.setModule(&Mod->getModule()); const std::vector<const char*> &Undefs = Mod->getAsmUndefinedRefs(); for (int I = 0, E = Undefs.size(); I != E; ++I) AsmUndefinedRefs[Undefs[I]] = 1; } void LTOCodeGenerator::setTargetOptions(TargetOptions options) { Options = options; } void LTOCodeGenerator::setDebugInfo(lto_debug_model debug) { switch (debug) { case LTO_DEBUG_MODEL_NONE: EmitDwarfDebugInfo = false; return; case LTO_DEBUG_MODEL_DWARF: EmitDwarfDebugInfo = true; return; } llvm_unreachable("Unknown debug format!"); } void LTOCodeGenerator::setCodePICModel(lto_codegen_model model) { switch (model) { case LTO_CODEGEN_PIC_MODEL_STATIC: case LTO_CODEGEN_PIC_MODEL_DYNAMIC: case LTO_CODEGEN_PIC_MODEL_DYNAMIC_NO_PIC: case LTO_CODEGEN_PIC_MODEL_DEFAULT: CodeModel = model; return; } llvm_unreachable("Unknown PIC model!"); } bool LTOCodeGenerator::writeMergedModules(const char *path, std::string &errMsg) { if (!determineTarget(errMsg)) return false; // mark which symbols can not be internalized applyScopeRestrictions(); // create output file std::error_code EC; tool_output_file Out(path, EC, sys::fs::F_None); if (EC) { errMsg = "could not open bitcode file for writing: "; errMsg += path; return false; } // write bitcode to it WriteBitcodeToFile(IRLinker.getModule(), Out.os(), ShouldEmbedUselists); Out.os().close(); if (Out.os().has_error()) { errMsg = "could not write bitcode file: "; errMsg += path; Out.os().clear_error(); return false; } Out.keep(); return true; } bool LTOCodeGenerator::compileOptimizedToFile(const char **name, std::string &errMsg) { // make unique temp .o file to put generated object file SmallString<128> Filename; int FD; std::error_code EC = sys::fs::createTemporaryFile("lto-llvm", "o", FD, Filename); if (EC) { errMsg = EC.message(); return false; } // generate object file tool_output_file objFile(Filename.c_str(), FD); bool genResult = compileOptimized(objFile.os(), errMsg); objFile.os().close(); if (objFile.os().has_error()) { objFile.os().clear_error(); sys::fs::remove(Twine(Filename)); return false; } objFile.keep(); if (!genResult) { sys::fs::remove(Twine(Filename)); return false; } NativeObjectPath = Filename.c_str(); *name = NativeObjectPath.c_str(); return true; } std::unique_ptr<MemoryBuffer> LTOCodeGenerator::compileOptimized(std::string &errMsg) { const char *name; if (!compileOptimizedToFile(&name, errMsg)) return nullptr; // read .o file into memory buffer ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr = MemoryBuffer::getFile(name, -1, false); if (std::error_code EC = BufferOrErr.getError()) { errMsg = EC.message(); sys::fs::remove(NativeObjectPath); return nullptr; } // remove temp files sys::fs::remove(NativeObjectPath); return std::move(*BufferOrErr); } bool LTOCodeGenerator::compile_to_file(const char **name, bool disableInline, bool disableGVNLoadPRE, bool disableVectorization, std::string &errMsg) { if (!optimize(disableInline, disableGVNLoadPRE, disableVectorization, errMsg)) return false; return compileOptimizedToFile(name, errMsg); } std::unique_ptr<MemoryBuffer> LTOCodeGenerator::compile(bool disableInline, bool disableGVNLoadPRE, bool disableVectorization, std::string &errMsg) { if (!optimize(disableInline, disableGVNLoadPRE, disableVectorization, errMsg)) return nullptr; return compileOptimized(errMsg); } bool LTOCodeGenerator::determineTarget(std::string &errMsg) { if (TargetMach) return true; std::string TripleStr = IRLinker.getModule()->getTargetTriple(); if (TripleStr.empty()) TripleStr = sys::getDefaultTargetTriple(); llvm::Triple Triple(TripleStr); // create target machine from info for merged modules const Target *march = TargetRegistry::lookupTarget(TripleStr, errMsg); if (!march) return false; // The relocation model is actually a static member of TargetMachine and // needs to be set before the TargetMachine is instantiated. Reloc::Model RelocModel = Reloc::Default; switch (CodeModel) { case LTO_CODEGEN_PIC_MODEL_STATIC: RelocModel = Reloc::Static; break; case LTO_CODEGEN_PIC_MODEL_DYNAMIC: RelocModel = Reloc::PIC_; break; case LTO_CODEGEN_PIC_MODEL_DYNAMIC_NO_PIC: RelocModel = Reloc::DynamicNoPIC; break; case LTO_CODEGEN_PIC_MODEL_DEFAULT: // RelocModel is already the default, so leave it that way. break; } // Construct LTOModule, hand over ownership of module and target. Use MAttr as // the default set of features. SubtargetFeatures Features(MAttr); Features.getDefaultSubtargetFeatures(Triple); std::string FeatureStr = Features.getString(); // Set a default CPU for Darwin triples. if (MCpu.empty() && Triple.isOSDarwin()) { if (Triple.getArch() == llvm::Triple::x86_64) MCpu = "core2"; else if (Triple.getArch() == llvm::Triple::x86) MCpu = "yonah"; else if (Triple.getArch() == llvm::Triple::aarch64) MCpu = "cyclone"; } CodeGenOpt::Level CGOptLevel; switch (OptLevel) { case 0: CGOptLevel = CodeGenOpt::None; break; case 1: CGOptLevel = CodeGenOpt::Less; break; case 2: CGOptLevel = CodeGenOpt::Default; break; case 3: CGOptLevel = CodeGenOpt::Aggressive; break; } TargetMach = march->createTargetMachine(TripleStr, MCpu, FeatureStr, Options, RelocModel, CodeModel::Default, CGOptLevel); return true; } void LTOCodeGenerator:: applyRestriction(GlobalValue &GV, ArrayRef<StringRef> Libcalls, std::vector<const char*> &MustPreserveList, SmallPtrSetImpl<GlobalValue*> &AsmUsed, Mangler &Mangler) { // There are no restrictions to apply to declarations. if (GV.isDeclaration()) return; // There is nothing more restrictive than private linkage. if (GV.hasPrivateLinkage()) return; SmallString<64> Buffer; TargetMach->getNameWithPrefix(Buffer, &GV, Mangler); if (MustPreserveSymbols.count(Buffer)) MustPreserveList.push_back(GV.getName().data()); if (AsmUndefinedRefs.count(Buffer)) AsmUsed.insert(&GV); // Conservatively append user-supplied runtime library functions to // llvm.compiler.used. These could be internalized and deleted by // optimizations like -globalopt, causing problems when later optimizations // add new library calls (e.g., llvm.memset => memset and printf => puts). // Leave it to the linker to remove any dead code (e.g. with -dead_strip). if (isa<Function>(GV) && std::binary_search(Libcalls.begin(), Libcalls.end(), GV.getName())) AsmUsed.insert(&GV); } static void findUsedValues(GlobalVariable *LLVMUsed, SmallPtrSetImpl<GlobalValue*> &UsedValues) { if (!LLVMUsed) return; ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer()); for (unsigned i = 0, e = Inits->getNumOperands(); i != e; ++i) if (GlobalValue *GV = dyn_cast<GlobalValue>(Inits->getOperand(i)->stripPointerCasts())) UsedValues.insert(GV); } // Collect names of runtime library functions. User-defined functions with the // same names are added to llvm.compiler.used to prevent them from being // deleted by optimizations. static void accumulateAndSortLibcalls(std::vector<StringRef> &Libcalls, const TargetLibraryInfo& TLI, const Module &Mod, const TargetMachine &TM) { // TargetLibraryInfo has info on C runtime library calls on the current // target. for (unsigned I = 0, E = static_cast<unsigned>(LibFunc::NumLibFuncs); I != E; ++I) { LibFunc::Func F = static_cast<LibFunc::Func>(I); if (TLI.has(F)) Libcalls.push_back(TLI.getName(F)); } SmallPtrSet<const TargetLowering *, 1> TLSet; for (const Function &F : Mod) { const TargetLowering *Lowering = TM.getSubtargetImpl(F)->getTargetLowering(); if (Lowering && TLSet.insert(Lowering).second) // TargetLowering has info on library calls that CodeGen expects to be // available, both from the C runtime and compiler-rt. for (unsigned I = 0, E = static_cast<unsigned>(RTLIB::UNKNOWN_LIBCALL); I != E; ++I) if (const char *Name = Lowering->getLibcallName(static_cast<RTLIB::Libcall>(I))) Libcalls.push_back(Name); } array_pod_sort(Libcalls.begin(), Libcalls.end()); Libcalls.erase(std::unique(Libcalls.begin(), Libcalls.end()), Libcalls.end()); } void LTOCodeGenerator::applyScopeRestrictions() { if (ScopeRestrictionsDone || !ShouldInternalize) return; Module *mergedModule = IRLinker.getModule(); // Start off with a verification pass. legacy::PassManager passes; passes.add(createVerifierPass()); // mark which symbols can not be internalized Mangler Mangler; std::vector<const char*> MustPreserveList; SmallPtrSet<GlobalValue*, 8> AsmUsed; std::vector<StringRef> Libcalls; TargetLibraryInfoImpl TLII(Triple(TargetMach->getTargetTriple())); TargetLibraryInfo TLI(TLII); accumulateAndSortLibcalls(Libcalls, TLI, *mergedModule, *TargetMach); for (Module::iterator f = mergedModule->begin(), e = mergedModule->end(); f != e; ++f) applyRestriction(*f, Libcalls, MustPreserveList, AsmUsed, Mangler); for (Module::global_iterator v = mergedModule->global_begin(), e = mergedModule->global_end(); v != e; ++v) applyRestriction(*v, Libcalls, MustPreserveList, AsmUsed, Mangler); for (Module::alias_iterator a = mergedModule->alias_begin(), e = mergedModule->alias_end(); a != e; ++a) applyRestriction(*a, Libcalls, MustPreserveList, AsmUsed, Mangler); GlobalVariable *LLVMCompilerUsed = mergedModule->getGlobalVariable("llvm.compiler.used"); findUsedValues(LLVMCompilerUsed, AsmUsed); if (LLVMCompilerUsed) LLVMCompilerUsed->eraseFromParent(); if (!AsmUsed.empty()) { llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(Context); std::vector<Constant*> asmUsed2; for (auto *GV : AsmUsed) { Constant *c = ConstantExpr::getBitCast(GV, i8PTy); asmUsed2.push_back(c); } llvm::ArrayType *ATy = llvm::ArrayType::get(i8PTy, asmUsed2.size()); LLVMCompilerUsed = new llvm::GlobalVariable(*mergedModule, ATy, false, llvm::GlobalValue::AppendingLinkage, llvm::ConstantArray::get(ATy, asmUsed2), "llvm.compiler.used"); LLVMCompilerUsed->setSection("llvm.metadata"); } passes.add(createInternalizePass(MustPreserveList)); // apply scope restrictions passes.run(*mergedModule); ScopeRestrictionsDone = true; } /// Optimize merged modules using various IPO passes bool LTOCodeGenerator::optimize(bool DisableInline, bool DisableGVNLoadPRE, bool DisableVectorization, std::string &errMsg) { if (!this->determineTarget(errMsg)) return false; Module *mergedModule = IRLinker.getModule(); // Mark which symbols can not be internalized this->applyScopeRestrictions(); // Instantiate the pass manager to organize the passes. legacy::PassManager passes; // Add an appropriate DataLayout instance for this module... mergedModule->setDataLayout(*TargetMach->getDataLayout()); passes.add( createTargetTransformInfoWrapperPass(TargetMach->getTargetIRAnalysis())); Triple TargetTriple(TargetMach->getTargetTriple()); PassManagerBuilder PMB; PMB.DisableGVNLoadPRE = DisableGVNLoadPRE; PMB.LoopVectorize = !DisableVectorization; PMB.SLPVectorize = !DisableVectorization; if (!DisableInline) PMB.Inliner = createFunctionInliningPass(); PMB.LibraryInfo = new TargetLibraryInfoImpl(TargetTriple); PMB.OptLevel = OptLevel; PMB.VerifyInput = true; PMB.VerifyOutput = true; PMB.populateLTOPassManager(passes); // Run our queue of passes all at once now, efficiently. passes.run(*mergedModule); return true; } bool LTOCodeGenerator::compileOptimized(raw_pwrite_stream &out, std::string &errMsg) { if (!this->determineTarget(errMsg)) return false; Module *mergedModule = IRLinker.getModule(); legacy::PassManager codeGenPasses; // If the bitcode files contain ARC code and were compiled with optimization, // the ObjCARCContractPass must be run, so do it unconditionally here. codeGenPasses.add(createObjCARCContractPass()); if (TargetMach->addPassesToEmitFile(codeGenPasses, out, TargetMachine::CGFT_ObjectFile)) { errMsg = "target file type not supported"; return false; } // Run the code generator, and write assembly file codeGenPasses.run(*mergedModule); return true; } /// setCodeGenDebugOptions - Set codegen debugging options to aid in debugging /// LTO problems. void LTOCodeGenerator::setCodeGenDebugOptions(const char *options) { for (std::pair<StringRef, StringRef> o = getToken(options); !o.first.empty(); o = getToken(o.second)) { // ParseCommandLineOptions() expects argv[0] to be program name. Lazily add // that. if (CodegenOptions.empty()) CodegenOptions.push_back(strdup("libLLVMLTO")); CodegenOptions.push_back(strdup(o.first.str().c_str())); } } void LTOCodeGenerator::parseCodeGenDebugOptions() { // if options were requested, set them if (!CodegenOptions.empty()) cl::ParseCommandLineOptions(CodegenOptions.size(), const_cast<char **>(&CodegenOptions[0])); } void LTOCodeGenerator::DiagnosticHandler(const DiagnosticInfo &DI, void *Context) { ((LTOCodeGenerator *)Context)->DiagnosticHandler2(DI); } void LTOCodeGenerator::DiagnosticHandler2(const DiagnosticInfo &DI) { // Map the LLVM internal diagnostic severity to the LTO diagnostic severity. lto_codegen_diagnostic_severity_t Severity; switch (DI.getSeverity()) { case DS_Error: Severity = LTO_DS_ERROR; break; case DS_Warning: Severity = LTO_DS_WARNING; break; case DS_Remark: Severity = LTO_DS_REMARK; break; case DS_Note: Severity = LTO_DS_NOTE; break; } // Create the string that will be reported to the external diagnostic handler. std::string MsgStorage; raw_string_ostream Stream(MsgStorage); DiagnosticPrinterRawOStream DP(Stream); DI.print(DP); Stream.flush(); // If this method has been called it means someone has set up an external // diagnostic handler. Assert on that. assert(DiagHandler && "Invalid diagnostic handler"); (*DiagHandler)(Severity, MsgStorage.c_str(), DiagContext); } void LTOCodeGenerator::setDiagnosticHandler(lto_diagnostic_handler_t DiagHandler, void *Ctxt) { this->DiagHandler = DiagHandler; this->DiagContext = Ctxt; if (!DiagHandler) return Context.setDiagnosticHandler(nullptr, nullptr); // Register the LTOCodeGenerator stub in the LLVMContext to forward the // diagnostic to the external DiagHandler. Context.setDiagnosticHandler(LTOCodeGenerator::DiagnosticHandler, this, /* RespectFilters */ true); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/LTO/CMakeLists.txt
add_llvm_library(LLVMLTO LTOModule.cpp LTOCodeGenerator.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/LTO ) add_dependencies(LLVMLTO intrinsics_gen)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/LTO/LLVMBuild.txt
;===- ./lib/LTO/LLVMBuild.txt ----------------------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = LTO parent = Libraries required_libraries = Analysis BitReader BitWriter Core IPA IPO InstCombine Linker Scalar Support Target ; CodeGen MC ObjCARC Object - HLSL Change
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/LTO/LTOModule.cpp
//===-- LTOModule.cpp - LLVM Link Time Optimizer --------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the Link Time Optimization library. This library is // intended to be used by linker to optimize code at link time. // //===----------------------------------------------------------------------===// #include "llvm/LTO/LTOModule.h" #include "llvm/ADT/Triple.h" #include "llvm/Bitcode/ReaderWriter.h" #include "llvm/CodeGen/Analysis.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Mangler.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCParser/MCAsmParser.h" #include "llvm/MC/MCSection.h" #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCTargetAsmParser.h" #include "llvm/MC/SubtargetFeature.h" #include "llvm/Object/IRObjectFile.h" #include "llvm/Object/ObjectFile.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Host.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/TargetSelect.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetSubtargetInfo.h" #include "llvm/Transforms/Utils/GlobalStatus.h" #include <system_error> using namespace llvm; using namespace llvm::object; LTOModule::LTOModule(std::unique_ptr<object::IRObjectFile> Obj, llvm::TargetMachine *TM) : IRFile(std::move(Obj)), _target(TM) {} LTOModule::LTOModule(std::unique_ptr<object::IRObjectFile> Obj, llvm::TargetMachine *TM, std::unique_ptr<LLVMContext> Context) : OwnedContext(std::move(Context)), IRFile(std::move(Obj)), _target(TM) {} LTOModule::~LTOModule() {} /// isBitcodeFile - Returns 'true' if the file (or memory contents) is LLVM /// bitcode. bool LTOModule::isBitcodeFile(const void *Mem, size_t Length) { ErrorOr<MemoryBufferRef> BCData = IRObjectFile::findBitcodeInMemBuffer( MemoryBufferRef(StringRef((const char *)Mem, Length), "<mem>")); return bool(BCData); } bool LTOModule::isBitcodeFile(const char *Path) { ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr = MemoryBuffer::getFile(Path); if (!BufferOrErr) return false; ErrorOr<MemoryBufferRef> BCData = IRObjectFile::findBitcodeInMemBuffer( BufferOrErr.get()->getMemBufferRef()); return bool(BCData); } bool LTOModule::isBitcodeForTarget(MemoryBuffer *Buffer, StringRef TriplePrefix) { ErrorOr<MemoryBufferRef> BCOrErr = IRObjectFile::findBitcodeInMemBuffer(Buffer->getMemBufferRef()); if (!BCOrErr) return false; LLVMContext Context; std::string Triple = getBitcodeTargetTriple(*BCOrErr, Context); return StringRef(Triple).startswith(TriplePrefix); } LTOModule *LTOModule::createFromFile(const char *path, TargetOptions options, std::string &errMsg) { ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr = MemoryBuffer::getFile(path); if (std::error_code EC = BufferOrErr.getError()) { errMsg = EC.message(); return nullptr; } std::unique_ptr<MemoryBuffer> Buffer = std::move(BufferOrErr.get()); return makeLTOModule(Buffer->getMemBufferRef(), options, errMsg, &getGlobalContext()); } LTOModule *LTOModule::createFromOpenFile(int fd, const char *path, size_t size, TargetOptions options, std::string &errMsg) { return createFromOpenFileSlice(fd, path, size, 0, options, errMsg); } LTOModule *LTOModule::createFromOpenFileSlice(int fd, const char *path, size_t map_size, off_t offset, TargetOptions options, std::string &errMsg) { ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr = MemoryBuffer::getOpenFileSlice(fd, path, map_size, offset); if (std::error_code EC = BufferOrErr.getError()) { errMsg = EC.message(); return nullptr; } std::unique_ptr<MemoryBuffer> Buffer = std::move(BufferOrErr.get()); return makeLTOModule(Buffer->getMemBufferRef(), options, errMsg, &getGlobalContext()); } LTOModule *LTOModule::createFromBuffer(const void *mem, size_t length, TargetOptions options, std::string &errMsg, StringRef path) { return createInContext(mem, length, options, errMsg, path, &getGlobalContext()); } LTOModule *LTOModule::createInLocalContext(const void *mem, size_t length, TargetOptions options, std::string &errMsg, StringRef path) { return createInContext(mem, length, options, errMsg, path, nullptr); } LTOModule *LTOModule::createInContext(const void *mem, size_t length, TargetOptions options, std::string &errMsg, StringRef path, LLVMContext *Context) { StringRef Data((const char *)mem, length); MemoryBufferRef Buffer(Data, path); return makeLTOModule(Buffer, options, errMsg, Context); } static std::unique_ptr<Module> parseBitcodeFileImpl(MemoryBufferRef Buffer, LLVMContext &Context, bool ShouldBeLazy, std::string &ErrMsg) { // Find the buffer. ErrorOr<MemoryBufferRef> MBOrErr = IRObjectFile::findBitcodeInMemBuffer(Buffer); if (std::error_code EC = MBOrErr.getError()) { ErrMsg = EC.message(); return nullptr; } std::function<void(const DiagnosticInfo &)> DiagnosticHandler = [&ErrMsg](const DiagnosticInfo &DI) { raw_string_ostream Stream(ErrMsg); DiagnosticPrinterRawOStream DP(Stream); DI.print(DP); }; if (!ShouldBeLazy) { // Parse the full file. ErrorOr<std::unique_ptr<Module>> M = parseBitcodeFile(*MBOrErr, Context, DiagnosticHandler); if (!M) return nullptr; return std::move(*M); } // Parse lazily. std::unique_ptr<MemoryBuffer> LightweightBuf = MemoryBuffer::getMemBuffer(*MBOrErr, false); ErrorOr<std::unique_ptr<Module>> M = getLazyBitcodeModule(std::move(LightweightBuf), Context, DiagnosticHandler, true /*ShouldLazyLoadMetadata*/); if (!M) return nullptr; return std::move(*M); } LTOModule *LTOModule::makeLTOModule(MemoryBufferRef Buffer, TargetOptions options, std::string &errMsg, LLVMContext *Context) { std::unique_ptr<LLVMContext> OwnedContext; if (!Context) { OwnedContext = llvm::make_unique<LLVMContext>(); Context = OwnedContext.get(); } // If we own a context, we know this is being used only for symbol // extraction, not linking. Be lazy in that case. std::unique_ptr<Module> M = parseBitcodeFileImpl( Buffer, *Context, /* ShouldBeLazy */ static_cast<bool>(OwnedContext), errMsg); if (!M) return nullptr; std::string TripleStr = M->getTargetTriple(); if (TripleStr.empty()) TripleStr = sys::getDefaultTargetTriple(); llvm::Triple Triple(TripleStr); // find machine architecture for this module const Target *march = TargetRegistry::lookupTarget(TripleStr, errMsg); if (!march) return nullptr; // construct LTOModule, hand over ownership of module and target SubtargetFeatures Features; Features.getDefaultSubtargetFeatures(Triple); std::string FeatureStr = Features.getString(); // Set a default CPU for Darwin triples. std::string CPU; if (Triple.isOSDarwin()) { if (Triple.getArch() == llvm::Triple::x86_64) CPU = "core2"; else if (Triple.getArch() == llvm::Triple::x86) CPU = "yonah"; else if (Triple.getArch() == llvm::Triple::aarch64) CPU = "cyclone"; } TargetMachine *target = march->createTargetMachine(TripleStr, CPU, FeatureStr, options); M->setDataLayout(*target->getDataLayout()); std::unique_ptr<object::IRObjectFile> IRObj( new object::IRObjectFile(Buffer, std::move(M))); LTOModule *Ret; if (OwnedContext) Ret = new LTOModule(std::move(IRObj), target, std::move(OwnedContext)); else Ret = new LTOModule(std::move(IRObj), target); if (Ret->parseSymbols(errMsg)) { delete Ret; return nullptr; } Ret->parseMetadata(); return Ret; } /// Create a MemoryBuffer from a memory range with an optional name. std::unique_ptr<MemoryBuffer> LTOModule::makeBuffer(const void *mem, size_t length, StringRef name) { const char *startPtr = (const char*)mem; return MemoryBuffer::getMemBuffer(StringRef(startPtr, length), name, false); } /// objcClassNameFromExpression - Get string that the data pointer points to. bool LTOModule::objcClassNameFromExpression(const Constant *c, std::string &name) { if (const ConstantExpr *ce = dyn_cast<ConstantExpr>(c)) { Constant *op = ce->getOperand(0); if (GlobalVariable *gvn = dyn_cast<GlobalVariable>(op)) { Constant *cn = gvn->getInitializer(); if (ConstantDataArray *ca = dyn_cast<ConstantDataArray>(cn)) { if (ca->isCString()) { name = (".objc_class_name_" + ca->getAsCString()).str(); return true; } } } } return false; } /// addObjCClass - Parse i386/ppc ObjC class data structure. void LTOModule::addObjCClass(const GlobalVariable *clgv) { const ConstantStruct *c = dyn_cast<ConstantStruct>(clgv->getInitializer()); if (!c) return; // second slot in __OBJC,__class is pointer to superclass name std::string superclassName; if (objcClassNameFromExpression(c->getOperand(1), superclassName)) { auto IterBool = _undefines.insert(std::make_pair(superclassName, NameAndAttributes())); if (IterBool.second) { NameAndAttributes &info = IterBool.first->second; info.name = IterBool.first->first().data(); info.attributes = LTO_SYMBOL_DEFINITION_UNDEFINED; info.isFunction = false; info.symbol = clgv; } } // third slot in __OBJC,__class is pointer to class name std::string className; if (objcClassNameFromExpression(c->getOperand(2), className)) { auto Iter = _defines.insert(className).first; NameAndAttributes info; info.name = Iter->first().data(); info.attributes = LTO_SYMBOL_PERMISSIONS_DATA | LTO_SYMBOL_DEFINITION_REGULAR | LTO_SYMBOL_SCOPE_DEFAULT; info.isFunction = false; info.symbol = clgv; _symbols.push_back(info); } } /// addObjCCategory - Parse i386/ppc ObjC category data structure. void LTOModule::addObjCCategory(const GlobalVariable *clgv) { const ConstantStruct *c = dyn_cast<ConstantStruct>(clgv->getInitializer()); if (!c) return; // second slot in __OBJC,__category is pointer to target class name std::string targetclassName; if (!objcClassNameFromExpression(c->getOperand(1), targetclassName)) return; auto IterBool = _undefines.insert(std::make_pair(targetclassName, NameAndAttributes())); if (!IterBool.second) return; NameAndAttributes &info = IterBool.first->second; info.name = IterBool.first->first().data(); info.attributes = LTO_SYMBOL_DEFINITION_UNDEFINED; info.isFunction = false; info.symbol = clgv; } /// addObjCClassRef - Parse i386/ppc ObjC class list data structure. void LTOModule::addObjCClassRef(const GlobalVariable *clgv) { std::string targetclassName; if (!objcClassNameFromExpression(clgv->getInitializer(), targetclassName)) return; auto IterBool = _undefines.insert(std::make_pair(targetclassName, NameAndAttributes())); if (!IterBool.second) return; NameAndAttributes &info = IterBool.first->second; info.name = IterBool.first->first().data(); info.attributes = LTO_SYMBOL_DEFINITION_UNDEFINED; info.isFunction = false; info.symbol = clgv; } void LTOModule::addDefinedDataSymbol(const object::BasicSymbolRef &Sym) { SmallString<64> Buffer; { raw_svector_ostream OS(Buffer); Sym.printName(OS); } const GlobalValue *V = IRFile->getSymbolGV(Sym.getRawDataRefImpl()); addDefinedDataSymbol(Buffer.c_str(), V); } void LTOModule::addDefinedDataSymbol(const char *Name, const GlobalValue *v) { // Add to list of defined symbols. addDefinedSymbol(Name, v, false); if (!v->hasSection() /* || !isTargetDarwin */) return; // Special case i386/ppc ObjC data structures in magic sections: // The issue is that the old ObjC object format did some strange // contortions to avoid real linker symbols. For instance, the // ObjC class data structure is allocated statically in the executable // that defines that class. That data structures contains a pointer to // its superclass. But instead of just initializing that part of the // struct to the address of its superclass, and letting the static and // dynamic linkers do the rest, the runtime works by having that field // instead point to a C-string that is the name of the superclass. // At runtime the objc initialization updates that pointer and sets // it to point to the actual super class. As far as the linker // knows it is just a pointer to a string. But then someone wanted the // linker to issue errors at build time if the superclass was not found. // So they figured out a way in mach-o object format to use an absolute // symbols (.objc_class_name_Foo = 0) and a floating reference // (.reference .objc_class_name_Bar) to cause the linker into erroring when // a class was missing. // The following synthesizes the implicit .objc_* symbols for the linker // from the ObjC data structures generated by the front end. // special case if this data blob is an ObjC class definition std::string Section = v->getSection(); if (Section.compare(0, 15, "__OBJC,__class,") == 0) { if (const GlobalVariable *gv = dyn_cast<GlobalVariable>(v)) { addObjCClass(gv); } } // special case if this data blob is an ObjC category definition else if (Section.compare(0, 18, "__OBJC,__category,") == 0) { if (const GlobalVariable *gv = dyn_cast<GlobalVariable>(v)) { addObjCCategory(gv); } } // special case if this data blob is the list of referenced classes else if (Section.compare(0, 18, "__OBJC,__cls_refs,") == 0) { if (const GlobalVariable *gv = dyn_cast<GlobalVariable>(v)) { addObjCClassRef(gv); } } } void LTOModule::addDefinedFunctionSymbol(const object::BasicSymbolRef &Sym) { SmallString<64> Buffer; { raw_svector_ostream OS(Buffer); Sym.printName(OS); } const Function *F = cast<Function>(IRFile->getSymbolGV(Sym.getRawDataRefImpl())); addDefinedFunctionSymbol(Buffer.c_str(), F); } void LTOModule::addDefinedFunctionSymbol(const char *Name, const Function *F) { // add to list of defined symbols addDefinedSymbol(Name, F, true); } void LTOModule::addDefinedSymbol(const char *Name, const GlobalValue *def, bool isFunction) { // set alignment part log2() can have rounding errors uint32_t align = def->getAlignment(); uint32_t attr = align ? countTrailingZeros(align) : 0; // set permissions part if (isFunction) { attr |= LTO_SYMBOL_PERMISSIONS_CODE; } else { const GlobalVariable *gv = dyn_cast<GlobalVariable>(def); if (gv && gv->isConstant()) attr |= LTO_SYMBOL_PERMISSIONS_RODATA; else attr |= LTO_SYMBOL_PERMISSIONS_DATA; } // set definition part if (def->hasWeakLinkage() || def->hasLinkOnceLinkage()) attr |= LTO_SYMBOL_DEFINITION_WEAK; else if (def->hasCommonLinkage()) attr |= LTO_SYMBOL_DEFINITION_TENTATIVE; else attr |= LTO_SYMBOL_DEFINITION_REGULAR; // set scope part if (def->hasLocalLinkage()) // Ignore visibility if linkage is local. attr |= LTO_SYMBOL_SCOPE_INTERNAL; else if (def->hasHiddenVisibility()) attr |= LTO_SYMBOL_SCOPE_HIDDEN; else if (def->hasProtectedVisibility()) attr |= LTO_SYMBOL_SCOPE_PROTECTED; else if (canBeOmittedFromSymbolTable(def)) attr |= LTO_SYMBOL_SCOPE_DEFAULT_CAN_BE_HIDDEN; else attr |= LTO_SYMBOL_SCOPE_DEFAULT; if (def->hasComdat()) attr |= LTO_SYMBOL_COMDAT; if (isa<GlobalAlias>(def)) attr |= LTO_SYMBOL_ALIAS; auto Iter = _defines.insert(Name).first; // fill information structure NameAndAttributes info; StringRef NameRef = Iter->first(); info.name = NameRef.data(); assert(info.name[NameRef.size()] == '\0'); info.attributes = attr; info.isFunction = isFunction; info.symbol = def; // add to table of symbols _symbols.push_back(info); } /// addAsmGlobalSymbol - Add a global symbol from module-level ASM to the /// defined list. void LTOModule::addAsmGlobalSymbol(const char *name, lto_symbol_attributes scope) { auto IterBool = _defines.insert(name); // only add new define if not already defined if (!IterBool.second) return; NameAndAttributes &info = _undefines[IterBool.first->first().data()]; if (info.symbol == nullptr) { // FIXME: This is trying to take care of module ASM like this: // // module asm ".zerofill __FOO, __foo, _bar_baz_qux, 0" // // but is gross and its mother dresses it funny. Have the ASM parser give us // more details for this type of situation so that we're not guessing so // much. // fill information structure info.name = IterBool.first->first().data(); info.attributes = LTO_SYMBOL_PERMISSIONS_DATA | LTO_SYMBOL_DEFINITION_REGULAR | scope; info.isFunction = false; info.symbol = nullptr; // add to table of symbols _symbols.push_back(info); return; } if (info.isFunction) addDefinedFunctionSymbol(info.name, cast<Function>(info.symbol)); else addDefinedDataSymbol(info.name, info.symbol); _symbols.back().attributes &= ~LTO_SYMBOL_SCOPE_MASK; _symbols.back().attributes |= scope; } /// addAsmGlobalSymbolUndef - Add a global symbol from module-level ASM to the /// undefined list. void LTOModule::addAsmGlobalSymbolUndef(const char *name) { auto IterBool = _undefines.insert(std::make_pair(name, NameAndAttributes())); _asm_undefines.push_back(IterBool.first->first().data()); // we already have the symbol if (!IterBool.second) return; uint32_t attr = LTO_SYMBOL_DEFINITION_UNDEFINED; attr |= LTO_SYMBOL_SCOPE_DEFAULT; NameAndAttributes &info = IterBool.first->second; info.name = IterBool.first->first().data(); info.attributes = attr; info.isFunction = false; info.symbol = nullptr; } /// Add a symbol which isn't defined just yet to a list to be resolved later. void LTOModule::addPotentialUndefinedSymbol(const object::BasicSymbolRef &Sym, bool isFunc) { SmallString<64> name; { raw_svector_ostream OS(name); Sym.printName(OS); } auto IterBool = _undefines.insert(std::make_pair(name, NameAndAttributes())); // we already have the symbol if (!IterBool.second) return; NameAndAttributes &info = IterBool.first->second; info.name = IterBool.first->first().data(); const GlobalValue *decl = IRFile->getSymbolGV(Sym.getRawDataRefImpl()); if (decl->hasExternalWeakLinkage()) info.attributes = LTO_SYMBOL_DEFINITION_WEAKUNDEF; else info.attributes = LTO_SYMBOL_DEFINITION_UNDEFINED; info.isFunction = isFunc; info.symbol = decl; } /// parseSymbols - Parse the symbols from the module and model-level ASM and add /// them to either the defined or undefined lists. bool LTOModule::parseSymbols(std::string &errMsg) { for (auto &Sym : IRFile->symbols()) { const GlobalValue *GV = IRFile->getSymbolGV(Sym.getRawDataRefImpl()); uint32_t Flags = Sym.getFlags(); if (Flags & object::BasicSymbolRef::SF_FormatSpecific) continue; bool IsUndefined = Flags & object::BasicSymbolRef::SF_Undefined; if (!GV) { SmallString<64> Buffer; { raw_svector_ostream OS(Buffer); Sym.printName(OS); } const char *Name = Buffer.c_str(); if (IsUndefined) addAsmGlobalSymbolUndef(Name); else if (Flags & object::BasicSymbolRef::SF_Global) addAsmGlobalSymbol(Name, LTO_SYMBOL_SCOPE_DEFAULT); else addAsmGlobalSymbol(Name, LTO_SYMBOL_SCOPE_INTERNAL); continue; } auto *F = dyn_cast<Function>(GV); if (IsUndefined) { addPotentialUndefinedSymbol(Sym, F != nullptr); continue; } if (F) { addDefinedFunctionSymbol(Sym); continue; } if (isa<GlobalVariable>(GV)) { addDefinedDataSymbol(Sym); continue; } assert(isa<GlobalAlias>(GV)); addDefinedDataSymbol(Sym); } // make symbols for all undefines for (StringMap<NameAndAttributes>::iterator u =_undefines.begin(), e = _undefines.end(); u != e; ++u) { // If this symbol also has a definition, then don't make an undefine because // it is a tentative definition. if (_defines.count(u->getKey())) continue; NameAndAttributes info = u->getValue(); _symbols.push_back(info); } return false; } /// parseMetadata - Parse metadata from the module void LTOModule::parseMetadata() { raw_string_ostream OS(LinkerOpts); // Linker Options if (Metadata *Val = getModule().getModuleFlag("Linker Options")) { MDNode *LinkerOptions = cast<MDNode>(Val); for (unsigned i = 0, e = LinkerOptions->getNumOperands(); i != e; ++i) { MDNode *MDOptions = cast<MDNode>(LinkerOptions->getOperand(i)); for (unsigned ii = 0, ie = MDOptions->getNumOperands(); ii != ie; ++ii) { MDString *MDOption = cast<MDString>(MDOptions->getOperand(ii)); OS << " " << MDOption->getString(); } } } // Globals Mangler Mang; for (const NameAndAttributes &Sym : _symbols) { if (!Sym.symbol) continue; _target->getObjFileLowering()->emitLinkerFlagsForGlobal(OS, Sym.symbol, Mang); } // Add other interesting metadata here. }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Passes/PassRegistry.def
//===- PassRegistry.def - Registry of passes --------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is used as the registry of passes that are part of the core LLVM // libraries. This file describes both transformation passes and analyses // Analyses are registered while transformation passes have names registered // that can be used when providing a textual pass pipeline. // //===----------------------------------------------------------------------===// #ifndef MODULE_ANALYSIS #define MODULE_ANALYSIS(NAME, CREATE_PASS) #endif MODULE_ANALYSIS("lcg", LazyCallGraphAnalysis()) MODULE_ANALYSIS("no-op-module", NoOpModuleAnalysis()) MODULE_ANALYSIS("targetlibinfo", TargetLibraryAnalysis()) #undef MODULE_ANALYSIS #ifndef MODULE_PASS #define MODULE_PASS(NAME, CREATE_PASS) #endif MODULE_PASS("invalidate<all>", InvalidateAllAnalysesPass()) MODULE_PASS("no-op-module", NoOpModulePass()) MODULE_PASS("print", PrintModulePass(dbgs())) MODULE_PASS("print-cg", LazyCallGraphPrinterPass(dbgs())) MODULE_PASS("verify", VerifierPass()) #undef MODULE_PASS #ifndef CGSCC_ANALYSIS #define CGSCC_ANALYSIS(NAME, CREATE_PASS) #endif CGSCC_ANALYSIS("no-op-cgscc", NoOpCGSCCAnalysis()) #undef CGSCC_ANALYSIS #ifndef CGSCC_PASS #define CGSCC_PASS(NAME, CREATE_PASS) #endif CGSCC_PASS("invalidate<all>", InvalidateAllAnalysesPass()) CGSCC_PASS("no-op-cgscc", NoOpCGSCCPass()) #undef CGSCC_PASS #ifndef FUNCTION_ANALYSIS #define FUNCTION_ANALYSIS(NAME, CREATE_PASS) #endif FUNCTION_ANALYSIS("assumptions", AssumptionAnalysis()) FUNCTION_ANALYSIS("domtree", DominatorTreeAnalysis()) FUNCTION_ANALYSIS("loops", LoopAnalysis()) FUNCTION_ANALYSIS("no-op-function", NoOpFunctionAnalysis()) FUNCTION_ANALYSIS("targetlibinfo", TargetLibraryAnalysis()) FUNCTION_ANALYSIS("targetir", TM ? TM->getTargetIRAnalysis() : TargetIRAnalysis()) #undef FUNCTION_ANALYSIS #ifndef FUNCTION_PASS #define FUNCTION_PASS(NAME, CREATE_PASS) #endif FUNCTION_PASS("early-cse", EarlyCSEPass()) FUNCTION_PASS("instcombine", InstCombinePass()) FUNCTION_PASS("invalidate<all>", InvalidateAllAnalysesPass()) FUNCTION_PASS("no-op-function", NoOpFunctionPass()) FUNCTION_PASS("lower-expect", LowerExpectIntrinsicPass()) FUNCTION_PASS("print", PrintFunctionPass(dbgs())) FUNCTION_PASS("print<assumptions>", AssumptionPrinterPass(dbgs())) FUNCTION_PASS("print<domtree>", DominatorTreePrinterPass(dbgs())) FUNCTION_PASS("print<loops>", LoopPrinterPass(dbgs())) FUNCTION_PASS("simplify-cfg", SimplifyCFGPass()) FUNCTION_PASS("verify", VerifierPass()) FUNCTION_PASS("verify<domtree>", DominatorTreeVerifierPass()) #undef FUNCTION_PASS
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Passes/CMakeLists.txt
add_llvm_library(LLVMPasses PassBuilder.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/Passes ) add_dependencies(LLVMPasses intrinsics_gen)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Passes/LLVMBuild.txt
;===- ./lib/Passes/LLVMBuild.txt -------------------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = Passes parent = Libraries required_libraries = Analysis Core IPA IPO InstCombine Scalar Support TransformUtils Vectorize
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Passes/PassBuilder.cpp
//===- Parsing, selection, and construction of pass pipelines -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// This file provides the implementation of the PassBuilder based on our /// static pass registry as well as related functionality. It also provides /// helpers to aid in analyzing, debugging, and testing passes and pass /// pipelines. /// //===----------------------------------------------------------------------===// #include "llvm/Passes/PassBuilder.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/CGSCCPassManager.h" #include "llvm/Analysis/LazyCallGraph.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IRPrintingPasses.h" #include "llvm/IR/PassManager.h" #include "llvm/IR/Verifier.h" #include "llvm/Support/Debug.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Transforms/InstCombine/InstCombine.h" #include "llvm/Transforms/Scalar/EarlyCSE.h" #include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h" #include "llvm/Transforms/Scalar/SimplifyCFG.h" using namespace llvm; namespace { /// \brief No-op module pass which does nothing. struct NoOpModulePass { PreservedAnalyses run(Module &M) { return PreservedAnalyses::all(); } static StringRef name() { return "NoOpModulePass"; } }; /// \brief No-op module analysis. struct NoOpModuleAnalysis { struct Result {}; Result run(Module &) { return Result(); } static StringRef name() { return "NoOpModuleAnalysis"; } static void *ID() { return (void *)&PassID; } private: static char PassID; }; char NoOpModuleAnalysis::PassID; /// \brief No-op CGSCC pass which does nothing. struct NoOpCGSCCPass { PreservedAnalyses run(LazyCallGraph::SCC &C) { return PreservedAnalyses::all(); } static StringRef name() { return "NoOpCGSCCPass"; } }; /// \brief No-op CGSCC analysis. struct NoOpCGSCCAnalysis { struct Result {}; Result run(LazyCallGraph::SCC &) { return Result(); } static StringRef name() { return "NoOpCGSCCAnalysis"; } static void *ID() { return (void *)&PassID; } private: static char PassID; }; char NoOpCGSCCAnalysis::PassID; /// \brief No-op function pass which does nothing. struct NoOpFunctionPass { PreservedAnalyses run(Function &F) { return PreservedAnalyses::all(); } static StringRef name() { return "NoOpFunctionPass"; } }; /// \brief No-op function analysis. struct NoOpFunctionAnalysis { struct Result {}; Result run(Function &) { return Result(); } static StringRef name() { return "NoOpFunctionAnalysis"; } static void *ID() { return (void *)&PassID; } private: static char PassID; }; char NoOpFunctionAnalysis::PassID; } // End anonymous namespace. void PassBuilder::registerModuleAnalyses(ModuleAnalysisManager &MAM) { #define MODULE_ANALYSIS(NAME, CREATE_PASS) \ MAM.registerPass(CREATE_PASS); #include "PassRegistry.def" } void PassBuilder::registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM) { #define CGSCC_ANALYSIS(NAME, CREATE_PASS) \ CGAM.registerPass(CREATE_PASS); #include "PassRegistry.def" } void PassBuilder::registerFunctionAnalyses(FunctionAnalysisManager &FAM) { #define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \ FAM.registerPass(CREATE_PASS); #include "PassRegistry.def" } #ifndef NDEBUG static bool isModulePassName(StringRef Name) { #define MODULE_PASS(NAME, CREATE_PASS) if (Name == NAME) return true; #define MODULE_ANALYSIS(NAME, CREATE_PASS) \ if (Name == "require<" NAME ">" || Name == "invalidate<" NAME ">") \ return true; #include "PassRegistry.def" return false; } #endif static bool isCGSCCPassName(StringRef Name) { #define CGSCC_PASS(NAME, CREATE_PASS) if (Name == NAME) return true; #define CGSCC_ANALYSIS(NAME, CREATE_PASS) \ if (Name == "require<" NAME ">" || Name == "invalidate<" NAME ">") \ return true; #include "PassRegistry.def" return false; } static bool isFunctionPassName(StringRef Name) { #define FUNCTION_PASS(NAME, CREATE_PASS) if (Name == NAME) return true; #define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \ if (Name == "require<" NAME ">" || Name == "invalidate<" NAME ">") \ return true; #include "PassRegistry.def" return false; } bool PassBuilder::parseModulePassName(ModulePassManager &MPM, StringRef Name) { #define MODULE_PASS(NAME, CREATE_PASS) \ if (Name == NAME) { \ MPM.addPass(CREATE_PASS); \ return true; \ } #define MODULE_ANALYSIS(NAME, CREATE_PASS) \ if (Name == "require<" NAME ">") { \ MPM.addPass(RequireAnalysisPass<decltype(CREATE_PASS)>()); \ return true; \ } \ if (Name == "invalidate<" NAME ">") { \ MPM.addPass(InvalidateAnalysisPass<decltype(CREATE_PASS)>()); \ return true; \ } #include "PassRegistry.def" return false; } bool PassBuilder::parseCGSCCPassName(CGSCCPassManager &CGPM, StringRef Name) { #define CGSCC_PASS(NAME, CREATE_PASS) \ if (Name == NAME) { \ CGPM.addPass(CREATE_PASS); \ return true; \ } #define CGSCC_ANALYSIS(NAME, CREATE_PASS) \ if (Name == "require<" NAME ">") { \ CGPM.addPass(RequireAnalysisPass<decltype(CREATE_PASS)>()); \ return true; \ } \ if (Name == "invalidate<" NAME ">") { \ CGPM.addPass(InvalidateAnalysisPass<decltype(CREATE_PASS)>()); \ return true; \ } #include "PassRegistry.def" return false; } bool PassBuilder::parseFunctionPassName(FunctionPassManager &FPM, StringRef Name) { #define FUNCTION_PASS(NAME, CREATE_PASS) \ if (Name == NAME) { \ FPM.addPass(CREATE_PASS); \ return true; \ } #define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \ if (Name == "require<" NAME ">") { \ FPM.addPass(RequireAnalysisPass<decltype(CREATE_PASS)>()); \ return true; \ } \ if (Name == "invalidate<" NAME ">") { \ FPM.addPass(InvalidateAnalysisPass<decltype(CREATE_PASS)>()); \ return true; \ } #include "PassRegistry.def" return false; } bool PassBuilder::parseFunctionPassPipeline(FunctionPassManager &FPM, StringRef &PipelineText, bool VerifyEachPass, bool DebugLogging) { for (;;) { // Parse nested pass managers by recursing. if (PipelineText.startswith("function(")) { FunctionPassManager NestedFPM(DebugLogging); // Parse the inner pipeline inte the nested manager. PipelineText = PipelineText.substr(strlen("function(")); if (!parseFunctionPassPipeline(NestedFPM, PipelineText, VerifyEachPass, DebugLogging) || PipelineText.empty()) return false; assert(PipelineText[0] == ')'); PipelineText = PipelineText.substr(1); // Add the nested pass manager with the appropriate adaptor. FPM.addPass(std::move(NestedFPM)); } else { // Otherwise try to parse a pass name. size_t End = PipelineText.find_first_of(",)"); if (!parseFunctionPassName(FPM, PipelineText.substr(0, End))) return false; if (VerifyEachPass) FPM.addPass(VerifierPass()); PipelineText = PipelineText.substr(End); } if (PipelineText.empty() || PipelineText[0] == ')') return true; assert(PipelineText[0] == ','); PipelineText = PipelineText.substr(1); } } bool PassBuilder::parseCGSCCPassPipeline(CGSCCPassManager &CGPM, StringRef &PipelineText, bool VerifyEachPass, bool DebugLogging) { for (;;) { // Parse nested pass managers by recursing. if (PipelineText.startswith("cgscc(")) { CGSCCPassManager NestedCGPM(DebugLogging); // Parse the inner pipeline into the nested manager. PipelineText = PipelineText.substr(strlen("cgscc(")); if (!parseCGSCCPassPipeline(NestedCGPM, PipelineText, VerifyEachPass, DebugLogging) || PipelineText.empty()) return false; assert(PipelineText[0] == ')'); PipelineText = PipelineText.substr(1); // Add the nested pass manager with the appropriate adaptor. CGPM.addPass(std::move(NestedCGPM)); } else if (PipelineText.startswith("function(")) { FunctionPassManager NestedFPM(DebugLogging); // Parse the inner pipeline inte the nested manager. PipelineText = PipelineText.substr(strlen("function(")); if (!parseFunctionPassPipeline(NestedFPM, PipelineText, VerifyEachPass, DebugLogging) || PipelineText.empty()) return false; assert(PipelineText[0] == ')'); PipelineText = PipelineText.substr(1); // Add the nested pass manager with the appropriate adaptor. CGPM.addPass(createCGSCCToFunctionPassAdaptor(std::move(NestedFPM))); } else { // Otherwise try to parse a pass name. size_t End = PipelineText.find_first_of(",)"); if (!parseCGSCCPassName(CGPM, PipelineText.substr(0, End))) return false; // FIXME: No verifier support for CGSCC passes! PipelineText = PipelineText.substr(End); } if (PipelineText.empty() || PipelineText[0] == ')') return true; assert(PipelineText[0] == ','); PipelineText = PipelineText.substr(1); } } bool PassBuilder::parseModulePassPipeline(ModulePassManager &MPM, StringRef &PipelineText, bool VerifyEachPass, bool DebugLogging) { for (;;) { // Parse nested pass managers by recursing. if (PipelineText.startswith("module(")) { ModulePassManager NestedMPM(DebugLogging); // Parse the inner pipeline into the nested manager. PipelineText = PipelineText.substr(strlen("module(")); if (!parseModulePassPipeline(NestedMPM, PipelineText, VerifyEachPass, DebugLogging) || PipelineText.empty()) return false; assert(PipelineText[0] == ')'); PipelineText = PipelineText.substr(1); // Now add the nested manager as a module pass. MPM.addPass(std::move(NestedMPM)); } else if (PipelineText.startswith("cgscc(")) { CGSCCPassManager NestedCGPM(DebugLogging); // Parse the inner pipeline inte the nested manager. PipelineText = PipelineText.substr(strlen("cgscc(")); if (!parseCGSCCPassPipeline(NestedCGPM, PipelineText, VerifyEachPass, DebugLogging) || PipelineText.empty()) return false; assert(PipelineText[0] == ')'); PipelineText = PipelineText.substr(1); // Add the nested pass manager with the appropriate adaptor. MPM.addPass( createModuleToPostOrderCGSCCPassAdaptor(std::move(NestedCGPM))); } else if (PipelineText.startswith("function(")) { FunctionPassManager NestedFPM(DebugLogging); // Parse the inner pipeline inte the nested manager. PipelineText = PipelineText.substr(strlen("function(")); if (!parseFunctionPassPipeline(NestedFPM, PipelineText, VerifyEachPass, DebugLogging) || PipelineText.empty()) return false; assert(PipelineText[0] == ')'); PipelineText = PipelineText.substr(1); // Add the nested pass manager with the appropriate adaptor. MPM.addPass(createModuleToFunctionPassAdaptor(std::move(NestedFPM))); } else { // Otherwise try to parse a pass name. size_t End = PipelineText.find_first_of(",)"); if (!parseModulePassName(MPM, PipelineText.substr(0, End))) return false; if (VerifyEachPass) MPM.addPass(VerifierPass()); PipelineText = PipelineText.substr(End); } if (PipelineText.empty() || PipelineText[0] == ')') return true; assert(PipelineText[0] == ','); PipelineText = PipelineText.substr(1); } } // Primary pass pipeline description parsing routine. // FIXME: Should this routine accept a TargetMachine or require the caller to // pre-populate the analysis managers with target-specific stuff? bool PassBuilder::parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText, bool VerifyEachPass, bool DebugLogging) { // By default, try to parse the pipeline as-if it were within an implicit // 'module(...)' pass pipeline. If this will parse at all, it needs to // consume the entire string. if (parseModulePassPipeline(MPM, PipelineText, VerifyEachPass, DebugLogging)) return PipelineText.empty(); // This isn't parsable as a module pipeline, look for the end of a pass name // and directly drop down to that layer. StringRef FirstName = PipelineText.substr(0, PipelineText.find_first_of(",)")); assert(!isModulePassName(FirstName) && "Already handled all module pipeline options."); // If this looks like a CGSCC pass, parse the whole thing as a CGSCC // pipeline. if (isCGSCCPassName(FirstName)) { CGSCCPassManager CGPM(DebugLogging); if (!parseCGSCCPassPipeline(CGPM, PipelineText, VerifyEachPass, DebugLogging) || !PipelineText.empty()) return false; MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM))); return true; } // Similarly, if this looks like a Function pass, parse the whole thing as // a Function pipelien. if (isFunctionPassName(FirstName)) { FunctionPassManager FPM(DebugLogging); if (!parseFunctionPassPipeline(FPM, PipelineText, VerifyEachPass, DebugLogging) || !PipelineText.empty()) return false; MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); return true; } return false; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilPackSignatureElement.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilSignatureElement.h // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Class to pack HLSL signature element. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/DxilPackSignatureElement.h" #include "dxc/DXIL/DxilSigPoint.h" #include "dxc/DXIL/DxilSignature.h" #include "dxc/HLSL/DxilSignatureAllocator.h" #include "dxc/Support/Global.h" using namespace hlsl; using namespace llvm; namespace hlsl { unsigned PackDxilSignature(DxilSignature &sig, DXIL::PackingStrategy packing) { unsigned rowsUsed = 0; bool bUseMinPrecision = sig.UseMinPrecision(); // Transfer to elements derived from DxilSignatureAllocator::PackElement std::vector<DxilPackElement> packElements; for (auto &SE : sig.GetElements()) { if (DxilSignature::ShouldBeAllocated(SE.get()->GetInterpretation())) packElements.emplace_back(SE.get(), bUseMinPrecision); } DXIL::SigPointKind Kind = sig.GetSigPointKind(); if (Kind == DXIL::SigPointKind::GSOut) { // Special case due to support for multiple streams DxilSignatureAllocator alloc[4] = {{32, bUseMinPrecision}, {32, bUseMinPrecision}, {32, bUseMinPrecision}, {32, bUseMinPrecision}}; std::vector<DxilSignatureAllocator::PackElement *> elements[4]; for (auto &SE : packElements) { elements[SE.Get()->GetOutputStream()].push_back(&SE); } for (unsigned i = 0; i < 4; ++i) { if (!elements[i].empty()) { unsigned streamRowsUsed = 0; switch (packing) { case DXIL::PackingStrategy::PrefixStable: streamRowsUsed = alloc[i].PackPrefixStable(elements[i], 0, 32); break; case DXIL::PackingStrategy::Optimized: streamRowsUsed = alloc[i].PackOptimized(elements[i], 0, 32); break; default: DXASSERT(false, "otherwise, invalid packing strategy supplied"); } if (streamRowsUsed > rowsUsed) rowsUsed = streamRowsUsed; } } // rowsUsed isn't really meaningful in this case. return rowsUsed; } const SigPoint *SP = SigPoint::GetSigPoint(Kind); DXIL::PackingKind PK = SP->GetPackingKind(); switch (PK) { case DXIL::PackingKind::None: // no packing. break; case DXIL::PackingKind::InputAssembler: // incrementally assign each element that belongs in the signature to the // start of the next free row for (auto &SE : packElements) { SE.SetLocation(rowsUsed, 0); rowsUsed += SE.GetRows(); } break; case DXIL::PackingKind::Vertex: case DXIL::PackingKind::PatchConstant: { DxilSignatureAllocator alloc(32, bUseMinPrecision); std::vector<DxilSignatureAllocator::PackElement *> elements; elements.reserve(packElements.size()); for (auto &SE : packElements) { elements.push_back(&SE); } switch (packing) { case DXIL::PackingStrategy::PrefixStable: rowsUsed = alloc.PackPrefixStable(elements, 0, 32); break; case DXIL::PackingStrategy::Optimized: rowsUsed = alloc.PackOptimized(elements, 0, 32); break; default: DXASSERT(false, "otherwise, invalid packing strategy supplied"); } } break; case DXIL::PackingKind::Target: // for SV_Target, assign rows according to semantic index, the rest are // unassigned (-1) Note: Overlapping semantic indices should be checked // elsewhere for (auto &SE : packElements) { if (SE.GetKind() != DXIL::SemanticKind::Target) continue; unsigned row = SE.Get()->GetSemanticStartIndex(); SE.SetLocation(row, 0); DXASSERT( SE.GetRows() == 1, "otherwise, SV_Target output not broken into separate rows earlier"); row += SE.GetRows(); if (rowsUsed < row) rowsUsed = row; } break; case DXIL::PackingKind::Invalid: default: DXASSERT(false, "unexpected PackingKind."); } return rowsUsed; } } // namespace hlsl #include "dxc/HLSL/DxilSignatureAllocator.inl" #include <algorithm>
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilNoops.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilNoops.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Passes to insert dx.noops() and replace them with llvm.donothing() // // // /////////////////////////////////////////////////////////////////////////////// // // Here is how dx.preserve and dx.noop work. // // For example, the following HLSL code: // // float foo(float y) { // float x = 10; // x = 20; // x += y; // return x; // } // // float main() : SV_Target { // float ret = foo(10); // return ret; // } // // Ordinarily, it gets lowered as: // // dx.op.storeOutput(3.0) // // Intermediate steps at "x = 20;", "x += y;", "return x", and // even the call to "foo()" are lost. // // But with with Preserve and Noop: // // void call dx.noop() // float ret = foo(10); // %y = dx.preserve(10.0, 10.0) // argument: y=10 // %x0 = dx.preserve(10.0, 10.0) // float x = 10; // %x1 = dx.preserve(20.0, %x0) // x = 20; // %x2 = fadd %x1, %y // x += y; // void call dx.noop() // return x // %ret = dx.preserve(%x2, %x2) // ret = returned from foo() // dx.op.storeOutput(%ret) // // All the intermediate transformations are visible and could be // made inspectable in the debugger. // // The reason why dx.preserve takes 2 arguments is so that the previous // value of a variable does not get cleaned up by DCE. For example: // // float x = ...; // do_some_stuff_with(x); // do_some_other_stuff(); // At this point, x's last values // // are dead and register allocators // // are free to reuse its location during // // call this code. // // So until x is assigned a new value below // // x could become unavailable. // // // // The second parameter in dx.preserve // // keeps x's previous value alive. // // x = ...; // Assign something else // // // When emitting proper DXIL, dx.noop and dx.preserve are lowered to // ordinary LLVM instructions that do not affect the semantic of the // shader, but can be used by a debugger or backend generator if they // know what to look for. // // We generate two special internal constant global vars: // // @dx.preserve.value = internal constant i1 false // @dx.nothing = internal constant i32 0 // // "call dx.noop()" is lowered to "load @dx.nothing" // // "... = call dx.preserve(%cur_val, %last_val)" is lowered to: // // %p = load @dx.preserve.value // ... = select i1 %p, %last_val, %cur_val // // Since %p is guaranteed to be false, the select is guaranteed // to return %cur_val. // #include "dxc/HLSL/DxilNoops.h" #include "dxc/DXIL/DxilConstants.h" #include "dxc/DXIL/DxilMetadataHelper.h" #include "llvm/ADT/StringRef.h" #include "llvm/Analysis/DxilValueCache.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/raw_os_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" #include <unordered_set> using namespace llvm; static Function *GetOrCreateNoopF(Module &M) { LLVMContext &Ctx = M.getContext(); FunctionType *FT = FunctionType::get(Type::getVoidTy(Ctx), false); Function *NoopF = cast<Function>(M.getOrInsertFunction(hlsl::kNoopName, FT)); NoopF->addFnAttr(Attribute::AttrKind::Convergent); return NoopF; } static Constant *GetConstGep(Constant *Ptr, unsigned Idx0, unsigned Idx1) { Type *i32Ty = Type::getInt32Ty(Ptr->getContext()); Constant *Indices[] = {ConstantInt::get(i32Ty, Idx0), ConstantInt::get(i32Ty, Idx1)}; return ConstantExpr::getGetElementPtr(nullptr, Ptr, Indices); } struct Store_Info { Instruction *StoreOrMC = nullptr; Value *Source = nullptr; // Alloca, GV, or Argument bool AllowLoads = false; }; static void FindAllStores(Value *Ptr, std::vector<Store_Info> *Stores, std::vector<Value *> &WorklistStorage, std::unordered_set<Value *> &SeenStorage) { assert(isa<Argument>(Ptr) || isa<AllocaInst>(Ptr) || isa<GlobalVariable>(Ptr)); WorklistStorage.clear(); WorklistStorage.push_back(Ptr); // Don't clear Seen Storage because two pointers can be involved with the same // memcpy. Clearing it can get the memcpy added twice. unsigned StartIdx = Stores->size(); bool AllowLoad = false; while (WorklistStorage.size()) { Value *V = WorklistStorage.back(); WorklistStorage.pop_back(); SeenStorage.insert(V); if (isa<BitCastOperator>(V) || isa<GEPOperator>(V) || isa<GlobalVariable>(V) || isa<AllocaInst>(V) || isa<Argument>(V)) { for (User *U : V->users()) { MemCpyInst *MC = nullptr; // Allow load if MC reads from pointer if ((MC = dyn_cast<MemCpyInst>(U)) && MC->getSource() == V) { AllowLoad = true; } else if (isa<LoadInst>(U)) { AllowLoad = true; } // Add to worklist if we haven't seen it before. else { if (!SeenStorage.count(U)) WorklistStorage.push_back(U); } } } else if (StoreInst *Store = dyn_cast<StoreInst>(V)) { Store_Info Info; Info.StoreOrMC = Store; Info.Source = Ptr; Stores->push_back(Info); } else if (MemCpyInst *MC = dyn_cast<MemCpyInst>(V)) { Store_Info Info; Info.StoreOrMC = MC; Info.Source = Ptr; Stores->push_back(Info); } } if (isa<GlobalVariable>(Ptr)) { AllowLoad = true; } if (AllowLoad) { Store_Info *ptr = Stores->data(); for (unsigned i = StartIdx; i < Stores->size(); i++) ptr[i].AllowLoads = true; } } static User *GetUniqueUser(Value *V) { if (V->user_begin() != V->user_end()) { if (std::next(V->user_begin()) == V->user_end()) return *V->user_begin(); } return nullptr; } static Value *GetOrCreatePreserveCond(Function *F) { assert(!F->isDeclaration()); Module *M = F->getParent(); GlobalVariable *GV = M->getGlobalVariable(hlsl::kPreserveName, true); if (!GV) { Type *i32Ty = Type::getInt32Ty(M->getContext()); Type *i32ArrayTy = ArrayType::get(i32Ty, 1); unsigned int Values[1] = {0}; Constant *InitialValue = llvm::ConstantDataArray::get(M->getContext(), Values); GV = new GlobalVariable(*M, i32ArrayTy, true, llvm::GlobalValue::InternalLinkage, InitialValue, hlsl::kPreserveName); } for (User *U : GV->users()) { GEPOperator *Gep = cast<GEPOperator>(U); for (User *GepU : Gep->users()) { LoadInst *LI = cast<LoadInst>(GepU); if (LI->getParent()->getParent() == F) { return GetUniqueUser(LI); } } } BasicBlock *BB = &F->getEntryBlock(); Instruction *InsertPt = &BB->front(); while (isa<AllocaInst>(InsertPt) || isa<DbgInfoIntrinsic>(InsertPt)) InsertPt = InsertPt->getNextNode(); IRBuilder<> B(InsertPt); Constant *Gep = GetConstGep(GV, 0, 0); LoadInst *Load = B.CreateLoad(Gep); return B.CreateTrunc(Load, B.getInt1Ty()); } bool hlsl::IsNop(llvm::Instruction *I) { CallInst *CI = dyn_cast<CallInst>(I); if (!CI) return false; Function *F = CI->getCalledFunction(); return F && F->getName() == hlsl::kNoopName; } static bool IsPreserveLoad(llvm::Instruction *I) { LoadInst *Load = dyn_cast<LoadInst>(I); if (!Load) return false; GEPOperator *GEP = dyn_cast<GEPOperator>(Load->getPointerOperand()); if (!GEP) return false; GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getPointerOperand()); return GV && GV->getLinkage() == GlobalVariable::LinkageTypes::InternalLinkage && GV->getName() == hlsl::kPreserveName; } static bool IsPreserveTrunc(llvm::Instruction *I) { TruncInst *Trunc = dyn_cast<TruncInst>(I); if (!Trunc) return false; Instruction *Load = dyn_cast<Instruction>(Trunc->getOperand(0)); if (!Load) return false; return IsPreserveLoad(Load); } bool hlsl::IsPreserve(llvm::Instruction *I) { SelectInst *S = dyn_cast<SelectInst>(I); if (!S) return false; Instruction *Trunc = dyn_cast<Instruction>(S->getCondition()); if (!Trunc) return false; return IsPreserveTrunc(Trunc); } bool hlsl::IsPreserveRelatedValue(llvm::Instruction *I) { return IsPreserveLoad(I) || IsPreserveTrunc(I) || hlsl::IsPreserve(I); } static Function *GetOrCreatePreserveF(Module *M, Type *Ty) { std::string str = hlsl::kPreservePrefix; raw_string_ostream os(str); Ty->print(os); os.flush(); FunctionType *FT = FunctionType::get(Ty, {Ty, Ty}, false); Function *PreserveF = cast<Function>(M->getOrInsertFunction(str, FT)); PreserveF->addFnAttr(Attribute::AttrKind::ReadNone); PreserveF->addFnAttr(Attribute::AttrKind::NoUnwind); return PreserveF; } static Instruction *CreatePreserve(Value *V, Value *LastV, Instruction *InsertPt) { assert(V->getType() == LastV->getType()); Type *Ty = V->getType(); Function *PreserveF = GetOrCreatePreserveF(InsertPt->getModule(), Ty); return CallInst::Create(PreserveF, ArrayRef<Value *>{V, LastV}, "", InsertPt); } static void LowerPreserveToSelect(CallInst *CI) { Value *V = CI->getArgOperand(0); Value *LastV = CI->getArgOperand(1); if (LastV == V) LastV = UndefValue::get(V->getType()); Value *Cond = GetOrCreatePreserveCond(CI->getParent()->getParent()); SelectInst *Select = SelectInst::Create(Cond, LastV, V, "", CI); Select->setDebugLoc(CI->getDebugLoc()); CI->replaceAllUsesWith(Select); CI->eraseFromParent(); } static void InsertNoopAt(Instruction *I) { Module &M = *I->getModule(); Function *NoopF = GetOrCreateNoopF(M); CallInst *Noop = CallInst::Create(NoopF, {}, I); Noop->setDebugLoc(I->getDebugLoc()); } static void InsertPreserve(bool AllowLoads, StoreInst *Store) { Value *V = Store->getValueOperand(); IRBuilder<> B(Store); Value *Last_Value = nullptr; // If there's never any loads for this memory location, // don't generate a load. if (AllowLoads) { Last_Value = B.CreateLoad(Store->getPointerOperand()); } else { Last_Value = UndefValue::get(V->getType()); } Instruction *Preserve = CreatePreserve(V, Last_Value, Store); Preserve->setDebugLoc(Store->getDebugLoc()); Store->replaceUsesOfWith(V, Preserve); } //========================================================== // Insertion pass // // This pass inserts dx.noop and dx.preserve where we want // to preserve line mapping or perserve some intermediate // values. struct DxilInsertPreserves : public ModulePass { static char ID; DxilInsertPreserves(bool AllowPreserves = false) : ModulePass(ID), AllowPreserves(AllowPreserves) { initializeDxilInsertPreservesPass(*PassRegistry::getPassRegistry()); } bool AllowPreserves = false; // Function overrides that resolve options when used for DxOpt void applyOptions(PassOptions O) override { GetPassOptionBool(O, "AllowPreserves", &AllowPreserves, false); } void dumpConfig(raw_ostream &OS) override { ModulePass::dumpConfig(OS); OS << ",AllowPreserves=" << AllowPreserves; } bool runOnModule(Module &M) override { std::vector<Store_Info> Stores; std::vector<Value *> WorklistStorage; std::unordered_set<Value *> SeenStorage; for (GlobalVariable &GV : M.globals()) { if (GV.getLinkage() != GlobalValue::LinkageTypes::InternalLinkage || GV.getType()->getPointerAddressSpace() == hlsl::DXIL::kTGSMAddrSpace) { continue; } for (User *U : GV.users()) { if (LoadInst *LI = dyn_cast<LoadInst>(U)) { InsertNoopAt(LI); } } FindAllStores(&GV, &Stores, WorklistStorage, SeenStorage); } bool Changed = false; for (Function &F : M) { if (F.isDeclaration()) continue; // Collect Stores on Allocas in function BasicBlock *Entry = &*F.begin(); for (Instruction &I : *Entry) { AllocaInst *AI = dyn_cast<AllocaInst>(&I); if (!AI) continue; // Skip temp allocas if (!AI->getMetadata(hlsl::DxilMDHelper::kDxilTempAllocaMDName)) FindAllStores(AI, &Stores, WorklistStorage, SeenStorage); } // Collect Stores on pointer Arguments in function for (Argument &Arg : F.args()) { if (Arg.getType()->isPointerTy()) FindAllStores(&Arg, &Stores, WorklistStorage, SeenStorage); } // For every real function call, insert a nop // so we can put a breakpoint there. for (User *U : F.users()) { if (CallInst *CI = dyn_cast<CallInst>(U)) { InsertNoopAt(CI); } } // Insert nops for void return statements for (BasicBlock &BB : F) { ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator()); if (Ret) InsertNoopAt(Ret); } } // Insert preserves or noops for these stores for (Store_Info &Info : Stores) { if (StoreInst *Store = dyn_cast<StoreInst>(Info.StoreOrMC)) { Value *V = Store->getValueOperand(); if (this->AllowPreserves && V && !V->getType()->isAggregateType() && !V->getType()->isPointerTy()) { InsertPreserve(Info.AllowLoads, Store); Changed = true; } else { InsertNoopAt(Store); Changed = true; } } else if (MemCpyInst *MC = cast<MemCpyInst>(Info.StoreOrMC)) { // TODO: Do something to preserve pointer's previous value. InsertNoopAt(MC); Changed = true; } } return Changed; } StringRef getPassName() const override { return "Dxil Insert Preserves"; } }; char DxilInsertPreserves::ID; Pass *llvm::createDxilInsertPreservesPass(bool AllowPreserves) { return new DxilInsertPreserves(AllowPreserves); } INITIALIZE_PASS(DxilInsertPreserves, "dxil-insert-preserves", "Dxil Insert Preserves", false, false) //========================================================== // Lower dx.preserve to select // // This pass replaces all dx.preserve calls to select // namespace { class DxilPreserveToSelect : public ModulePass { public: static char ID; SmallDenseMap<Type *, Function *> PreserveFunctions; DxilPreserveToSelect() : ModulePass(ID) { initializeDxilPreserveToSelectPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override { bool Changed = false; for (auto fit = M.getFunctionList().begin(), end = M.getFunctionList().end(); fit != end;) { Function *F = &*(fit++); if (!F->isDeclaration()) continue; if (F->getName().startswith(hlsl::kPreservePrefix)) { for (auto uit = F->user_begin(), end = F->user_end(); uit != end;) { User *U = *(uit++); CallInst *CI = cast<CallInst>(U); LowerPreserveToSelect(CI); } F->eraseFromParent(); Changed = true; } } return Changed; } StringRef getPassName() const override { return "Dxil Lower Preserves to Selects"; } }; char DxilPreserveToSelect::ID; } // namespace Pass *llvm::createDxilPreserveToSelectPass() { return new DxilPreserveToSelect(); } INITIALIZE_PASS(DxilPreserveToSelect, "dxil-preserves-to-select", "Dxil Preserves To Select", false, false) //========================================================== // output Argument debug info rewrite // namespace { class DxilRewriteOutputArgDebugInfo : public ModulePass { public: static char ID; DxilRewriteOutputArgDebugInfo() : ModulePass(ID) { initializeDxilRewriteOutputArgDebugInfoPass( *PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override { DITypeIdentifierMap EmptyMap; DIBuilder DIB(M); bool Changed = false; for (Function &F : M) { for (Argument &Arg : F.args()) { if (!Arg.getType()->isPointerTy()) continue; DbgDeclareInst *Declare = llvm::FindAllocaDbgDeclare(&Arg); if (!Declare) continue; DILocalVariable *Var = Declare->getVariable(); DIType *Ty = Var->getType().resolve(EmptyMap); DIExpression *Expr = Declare->getExpression(); if (Expr->getNumElements() == 1 && Expr->getElement(0) == dwarf::DW_OP_deref) { while (Ty && (Ty->getTag() == dwarf::DW_TAG_reference_type || Ty->getTag() == dwarf::DW_TAG_restrict_type)) { Ty = cast<DIDerivedType>(Ty)->getBaseType().resolve(EmptyMap); } if (Ty) { DILocalVariable *NewVar = DIB.createLocalVariable( dwarf::DW_TAG_arg_variable, Var->getScope(), Var->getName(), Var->getFile(), Var->getLine(), Ty, false, 0, Var->getArg()); DIExpression *EmptyExpr = DIExpression::get(M.getContext(), {}); DIB.insertDeclare(&Arg, NewVar, EmptyExpr, Declare->getDebugLoc(), Declare); Declare->eraseFromParent(); Changed = true; } } } } return Changed; } StringRef getPassName() const override { return "Dxil Rewrite Output Arg Debug Info"; } }; char DxilRewriteOutputArgDebugInfo::ID; } // namespace Pass *llvm::createDxilRewriteOutputArgDebugInfoPass() { return new DxilRewriteOutputArgDebugInfo(); } INITIALIZE_PASS(DxilRewriteOutputArgDebugInfo, "dxil-rewrite-output-arg-debug-info", "Dxil Rewrite Output Arg Debug Info", false, false) //========================================================== // Reader pass // namespace { class DxilReinsertNops : public ModulePass { public: static char ID; DxilReinsertNops() : ModulePass(ID) { initializeDxilReinsertNopsPass(*PassRegistry::getPassRegistry()); } // In various linking scenarios, the dx.nothing.a variable might be prefixed // and/or suffixed with something: // // <library_name>.dx.nothing.a.<another_thing> // // This routine looks for the "dx.nothing.a" string inside of it, and as long // as it's used in the expected way: // // %0 = load i32, i32* getelementptr inbounds ([1 x i32], [1 x i32]* // @dx.nothing.a, i32 0, i32 0) // // ...it is deemed a valid nop. // static bool IsLegalNothingVarName(StringRef Name) { // There should be a single instance of the name in this GV. if (1 != Name.count(hlsl::kNothingName)) return false; size_t Loc = Name.find(hlsl::kNothingName); StringRef Prefix = Name.substr(0, Loc); StringRef Suffix = Name.substr(Loc + Name.size()); // There should be either no prefix or a prefix that ends with . if (!Prefix.empty() && !Prefix.endswith(".")) { return false; } // There should be either no suffix or a prefix that begins with with . if (!Suffix.empty() && !Suffix.startswith(".")) { return false; } return true; } bool runOnModule(Module &M) override { bool Changed = false; for (GlobalVariable &GV : M.globals()) { if (!IsLegalNothingVarName(GV.getName())) continue; const bool IsValidType = GV.getValueType()->isArrayTy() && GV.getValueType()->getArrayElementType() == Type::getInt32Ty(M.getContext()) && GV.getValueType()->getArrayNumElements() == 1; if (!IsValidType) return false; for (User *GVU : GV.users()) { ConstantExpr *CE = dyn_cast<ConstantExpr>(GVU); if (!CE || CE->getOpcode() != Instruction::GetElementPtr) continue; for (auto it = CE->user_begin(), end = CE->user_end(); it != end;) { User *U = *(it++); LoadInst *LI = dyn_cast<LoadInst>(U); if (!LI) continue; InsertNoopAt(LI); LI->eraseFromParent(); Changed = true; } } } return Changed; } StringRef getPassName() const override { return "Dxil Reinsert Nops"; } }; char DxilReinsertNops::ID; } // namespace Pass *llvm::createDxilReinsertNopsPass() { return new DxilReinsertNops(); } INITIALIZE_PASS(DxilReinsertNops, "dxil-reinsert-nops", "Dxil Reinsert Nops", false, false) //========================================================== // Finalize pass // namespace { class DxilFinalizePreserves : public ModulePass { public: static char ID; GlobalVariable *NothingGV = nullptr; DxilFinalizePreserves() : ModulePass(ID) { initializeDxilFinalizePreservesPass(*PassRegistry::getPassRegistry()); } Instruction *GetFinalNoopInst(Module &M, Instruction *InsertBefore) { Type *i32Ty = Type::getInt32Ty(M.getContext()); if (!NothingGV) { NothingGV = M.getGlobalVariable(hlsl::kNothingName); if (!NothingGV) { Type *i32ArrayTy = ArrayType::get(i32Ty, 1); unsigned int Values[1] = {0}; Constant *InitialValue = llvm::ConstantDataArray::get(M.getContext(), Values); NothingGV = new GlobalVariable(M, i32ArrayTy, true, llvm::GlobalValue::InternalLinkage, InitialValue, hlsl::kNothingName); } } Constant *Gep = GetConstGep(NothingGV, 0, 0); return new llvm::LoadInst(Gep, nullptr, InsertBefore); } bool LowerPreserves(Module &M); bool LowerNoops(Module &M); bool runOnModule(Module &M) override; StringRef getPassName() const override { return "Dxil Finalize Preserves"; } }; char DxilFinalizePreserves::ID; } // namespace // Fix undefs in the dx.preserve -> selects bool DxilFinalizePreserves::LowerPreserves(Module &M) { bool Changed = false; GlobalVariable *GV = M.getGlobalVariable(hlsl::kPreserveName, true); if (GV) { for (User *U : GV->users()) { GEPOperator *Gep = cast<GEPOperator>(U); for (User *GepU : Gep->users()) { LoadInst *LI = cast<LoadInst>(GepU); assert(LI->user_begin() != LI->user_end() && std::next(LI->user_begin()) == LI->user_end()); Instruction *I = cast<Instruction>(*LI->user_begin()); for (User *UU : I->users()) { SelectInst *P = cast<SelectInst>(UU); Value *PrevV = P->getTrueValue(); Value *CurV = P->getFalseValue(); if (isa<UndefValue>(PrevV) || isa<Constant>(PrevV)) { P->setOperand(1, CurV); Changed = true; } } } } } return Changed; } // Replace all @dx.noop's with load @dx.nothing.value bool DxilFinalizePreserves::LowerNoops(Module &M) { bool Changed = false; Function *NoopF = nullptr; for (Function &F : M) { if (!F.isDeclaration()) continue; if (F.getName() == hlsl::kNoopName) { NoopF = &F; } } if (NoopF) { for (auto It = NoopF->user_begin(), E = NoopF->user_end(); It != E;) { User *U = *(It++); CallInst *CI = cast<CallInst>(U); Instruction *Nop = GetFinalNoopInst(M, CI); Nop->setDebugLoc(CI->getDebugLoc()); CI->eraseFromParent(); Changed = true; } assert(NoopF->user_empty() && "dx.noop calls must be all removed now"); NoopF->eraseFromParent(); } return Changed; } // Replace all preserves and nops bool DxilFinalizePreserves::runOnModule(Module &M) { bool Changed = false; Changed |= LowerPreserves(M); Changed |= LowerNoops(M); return Changed; } Pass *llvm::createDxilFinalizePreservesPass() { return new DxilFinalizePreserves(); } INITIALIZE_PASS(DxilFinalizePreserves, "dxil-finalize-preserves", "Dxil Finalize Preserves", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLSignatureLower.h
/////////////////////////////////////////////////////////////////////////////// // // // HLSignatureLower.h // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Lower signatures of entry function to DXIL LoadInput/StoreOutput. // // // /////////////////////////////////////////////////////////////////////////////// #pragma once #include "dxc/DXIL/DxilConstants.h" #include <unordered_map> #include <unordered_set> namespace llvm { class Value; class Argument; class Function; class StringRef; } // namespace llvm namespace hlsl { class HLModule; struct DxilEntrySignature; class DxilFunctionAnnotation; class ShaderModel; struct DxilFunctionProps; class DxilSignatureElement; class DxilParameterAnnotation; class SigPoint; class HLSignatureLower { public: HLSignatureLower(llvm::Function *F, HLModule &M, DxilEntrySignature &Sig) : Entry(F), HLM(M), EntrySig(Sig) {} void Run(); private: // Create signatures. void ProcessArgument(llvm::Function *func, DxilFunctionAnnotation *EntryAnnotation, llvm::Argument &arg, DxilFunctionProps &props, const ShaderModel *pSM, bool isPatchConstantFunction, bool forceOut, bool &hasClipPlane); void CreateDxilSignatures(); // Allocate DXIL input/output. void AllocateDxilInputOutputs(); // Generate DXIL input load, output store void GenerateDxilInputs(); void GenerateDxilOutputs(); void GenerateDxilPrimOutputs(); void GenerateDxilInputsOutputs(DXIL::SignatureKind SK); void GenerateDxilComputeAndNodeCommonInputs(); void GenerateDxilPatchConstantLdSt(); void GenerateDxilPatchConstantFunctionInputs(); void GenerateClipPlanesForVS(llvm::Value *outPosition); bool HasClipPlanes(); // Generate DXIL stream output operation. void GenerateStreamOutputOperation(llvm::Value *streamVal, unsigned streamID); // Generate DXIL stream output operations. void GenerateStreamOutputOperations(); // Generate DXIL EmitIndices operation. void GenerateEmitIndicesOperation(llvm::Value *indicesOutput); // Generate DXIL EmitIndices operations. void GenerateEmitIndicesOperations(); // Generate DXIL GetMeshPayload operation. void GenerateGetMeshPayloadOperation(); private: llvm::Function *Entry; HLModule &HLM; DxilEntrySignature &EntrySig; // For validation std::unordered_map<unsigned, std::unordered_set<unsigned>> m_InputSemanticsUsed, m_OutputSemanticsUsed[4], m_PatchConstantSemanticsUsed, m_OtherSemanticsUsed; // SignatureElement to Value map for GenerateDxilInputsOutputs. std::unordered_map<DxilSignatureElement *, llvm::Value *> m_sigValueMap; // Patch constant function inputs to signature element map for // GenerateDxilPatchConstantFunctionInputs. std::unordered_map<unsigned, DxilSignatureElement *> m_patchConstantInputsSigMap; // Set to save inout arguments for GenerateDxilInputsOutputs. std::unordered_set<llvm::Value *> m_inoutArgSet; // SignatureElement which has precise attribute for GenerateDxilInputsOutputs. std::unordered_set<DxilSignatureElement *> m_preciseSigSet; }; } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilSignatureValidation.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilSignatureElement.h // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Validate HLSL signature element packing. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilSigPoint.h" #include "dxc/DXIL/DxilSignature.h" #include "dxc/HLSL/DxilSignatureAllocator.h" #include "dxc/Support/Global.h" using namespace hlsl; using namespace llvm; #include <assert.h> // Needed for DxilPipelineStateValidation.h #include <functional> #include "dxc/DxilContainer/DxilPipelineStateValidation.h" #include "dxc/HLSL/ViewIDPipelineValidation.inl"
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/ControlDependence.cpp
/////////////////////////////////////////////////////////////////////////////// // // // ControlDependence.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Control dependence is computed using algorithm in Figure 7.9 from [AK]. // // // // References // // [AK] Optimizing Compilers for Modern Architectures by Allen and Kennedy. // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/ControlDependence.h" #include "dxc/Support/Global.h" #include "llvm/Support/Debug.h" using namespace llvm; using namespace hlsl; const BasicBlockSet &ControlDependence::GetCDBlocks(BasicBlock *pBB) const { auto it = m_ControlDependence.find(pBB); if (it != m_ControlDependence.end()) return it->second; else return m_EmptyBBSet; } void ControlDependence::print(raw_ostream &OS) { OS << "Control dependence for function '" << m_pFunc->getName() << "'\n"; for (auto &it : m_ControlDependence) { BasicBlock *pBB = it.first; OS << "Block " << pBB->getName() << ": { "; bool bFirst = true; for (BasicBlock *pBB2 : it.second) { if (!bFirst) OS << ", "; OS << pBB2->getName(); bFirst = false; } OS << " }\n"; } OS << "\n"; } void ControlDependence::dump() { print(dbgs()); } void ControlDependence::Compute(Function *F, PostDomRelationType &PostDomRel) { m_pFunc = F; // Compute reverse topological order of PDT. BasicBlockVector RevTopOrder; BasicBlockSet VisitedBBs; for (BasicBlock *pBB : PostDomRel.getRoots()) { ComputeRevTopOrderRec(PostDomRel, pBB, RevTopOrder, VisitedBBs); } DXASSERT_NOMSG(RevTopOrder.size() == VisitedBBs.size()); // Compute control dependence relation. for (size_t iBB = 0; iBB < RevTopOrder.size(); iBB++) { BasicBlock *x = RevTopOrder[iBB]; // For each y = pred(x): if ipostdom(y) != x then add "x is control // dependent on y" for (auto itPred = pred_begin(x), endPred = pred_end(x); itPred != endPred; ++itPred) { BasicBlock *y = *itPred; // predecessor of x BasicBlock *pPredIDomBB = GetIPostDom(PostDomRel, y); if (pPredIDomBB != x) { m_ControlDependence[x].insert(y); } } // For all z such that ipostdom(z) = x for (DomTreeNode *child : PostDomRel.getNode(x)->getChildren()) { BasicBlock *z = child->getBlock(); auto it = m_ControlDependence.find(z); if (it == m_ControlDependence.end()) continue; // For all y in CDG(z) for (BasicBlock *y : it->second) { // if ipostdom(y) != x then add "x is control dependent on y" BasicBlock *pPredIDomBB = GetIPostDom(PostDomRel, y); if (pPredIDomBB != x) { m_ControlDependence[x].insert(y); } } } } } void ControlDependence::Clear() { m_pFunc = nullptr; m_ControlDependence.clear(); m_EmptyBBSet.clear(); } BasicBlock *ControlDependence::GetIPostDom(PostDomRelationType &PostDomRel, BasicBlock *pBB) { auto *pPDTNode = PostDomRel.getNode(pBB); auto *pIDomNode = pPDTNode->getIDom(); BasicBlock *pIDomBB = pIDomNode != nullptr ? pIDomNode->getBlock() : nullptr; return pIDomBB; } void ControlDependence::ComputeRevTopOrderRec(PostDomRelationType &PostDomRel, BasicBlock *pBB, BasicBlockVector &RevTopOrder, BasicBlockSet &VisitedBBs) { if (VisitedBBs.find(pBB) != VisitedBBs.end()) { return; } VisitedBBs.insert(pBB); SmallVector<BasicBlock *, 8> Descendants; PostDomRel.getDescendants(pBB, Descendants); for (BasicBlock *pDescBB : Descendants) { if (pDescBB != pBB) ComputeRevTopOrderRec(PostDomRel, pDescBB, RevTopOrder, VisitedBBs); } RevTopOrder.emplace_back(pBB); }