Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilPatchShaderRecordBindings.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilPatchShaderRecordBindings.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Provides a pass used by the RayTracing Fallback Lyaer to add modify // // bindings to pull local root signature parameters from a global // // "shader table" buffer instead // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilFunctionProps.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilSignatureElement.h" #include "dxc/HLSL/DxilFallbackLayerPass.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/Support/Global.h" #include "dxc/DXIL/DxilConstants.h" #include "dxc/DXIL/DxilInstructions.h" #include "dxc/DXIL/DxilTypeSystem.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/DxilRootSignature/DxilRootSignature.h" #include "dxc/HLSL/DxilSpanAllocator.h" #include "dxc/Support/Unicode.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/ADT/BitVector.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" #include "llvm/Pass.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" #include <array> #include <functional> #include <memory> #include <unordered_map> #include <unordered_set> struct D3D12_VERSIONED_ROOT_SIGNATURE_DESC; #include "DxilPatchShaderRecordBindingsShared.h" using namespace llvm; using namespace hlsl; bool operator==(const ViewKey &a, const ViewKey &b) { return memcmp(&a, &b, sizeof(a)) == 0; } const size_t SizeofD3D12GpuVA = sizeof(uint64_t); const size_t SizeofD3D12GpuDescriptorHandle = sizeof(uint64_t); Function *CloneFunction(Function *Orig, const llvm::Twine &Name, llvm::Module *llvmModule) { Function *F = Function::Create(Orig->getFunctionType(), GlobalValue::LinkageTypes::ExternalLinkage, Name, llvmModule); SmallVector<ReturnInst *, 2> Returns; ValueToValueMapTy vmap; // Map params. auto entryParamIt = F->arg_begin(); for (Argument &param : Orig->args()) { vmap[&param] = (entryParamIt++); } DxilModule &DM = llvmModule->GetOrCreateDxilModule(); llvm::CloneFunctionInto(F, Orig, vmap, /*ModuleLevelChagnes*/ false, Returns); DM.GetTypeSystem().CopyFunctionAnnotation(F, Orig, DM.GetTypeSystem()); if (DM.HasDxilFunctionProps(F)) { DM.CloneDxilEntryProps(Orig, F); } return F; } struct ShaderRecordEntry { DxilRootParameterType ParameterType; unsigned int RecordOffsetInBytes; unsigned int OffsetInDescriptors; // Only valid for descriptor tables static ShaderRecordEntry InvalidEntry() { return {(DxilRootParameterType)-1, (unsigned int)-1, 0}; } bool IsInvalid() { return (unsigned int)ParameterType == (unsigned int)-1; } }; struct D3D12_VERSIONED_ROOT_SIGNATURE_DESC; class DxilPatchShaderRecordBindings : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilPatchShaderRecordBindings() : ModulePass(ID) {} StringRef getPassName() const override { return "DXIL Patch Shader Record Binding"; } void applyOptions(PassOptions O) override; bool runOnModule(Module &M) override; private: void ValidateParameters(); void AddInputBinding(Module &M); void PatchShaderBindings(Module &M); void InitializeViewTable(); unsigned int AddSRVRawBuffer(Module &M, unsigned int registerIndex, unsigned int registerSpace, const std::string &bufferName); unsigned int AddHandle(Module &M, unsigned int baseRegisterIndex, unsigned int rangeSize, unsigned int registerSpace, DXIL::ResourceClass resClass, DXIL::ResourceKind resKind, const std::string &bufferName, llvm::Type *type = nullptr, unsigned int constantBufferSize = 0); unsigned int AddAliasedHandle(Module &M, unsigned int baseRegisterIndex, unsigned int registerSpace, DXIL::ResourceClass resClass, DXIL::ResourceKind resKind, const std::string &bufferName, llvm::Type *type); unsigned int AddCBufferAliasedHandle(Module &M, unsigned int baseRegisterIndex, unsigned int registerSpace, const std::string &bufferName); llvm::Value *CreateOffsetToShaderRecord(Module &M, IRBuilder<> &Builder, unsigned int RecordOffsetInBytes, llvm::Value *CbufferOffsetInBytes); llvm::Value * CreateShaderRecordBufferLoad(Module &M, IRBuilder<> &Builder, llvm::Value *ShaderRecordOffsetInBytes, llvm::Type *type); llvm::Value *CreateCBufferLoadOffsetInBytes(Module &M, IRBuilder<> &Builder, llvm::Instruction *instruction); llvm::Value *CreateCBufferLoadLegacy(Module &M, IRBuilder<> &Builder, llvm::Value *ResourceHandle, unsigned int RowToLoad = 0); llvm::Value *LoadShaderRecordData(Module &M, IRBuilder<> &Builder, llvm::Value *offsetToShaderRecord, unsigned int dataOffsetInShaderRecord); void PatchCreateHandleToUseDescriptorIndex( Module &M, IRBuilder<> &Builder, DXIL::ResourceKind &resourceKind, DXIL::ResourceClass &resourceClass, llvm::Type *resourceType, llvm::Value *descriptorIndex, DxilInst_CreateHandleForLib &createHandleInstr); bool GetHandleInfo(Module &M, DxilInst_CreateHandleForLib &createHandleStructForLib, unsigned int &shaderRegister, unsigned int &registerSpace, DXIL::ResourceKind &kind, DXIL::ResourceClass &resClass, llvm::Type *&resType); llvm::Value *GetAliasedDescriptorHeapHandle(Module &M, llvm::Type *, DXIL::ResourceClass resClass, DXIL::ResourceKind resKind); unsigned int GetConstantBufferOffsetToShaderRecord(); bool IsCBufferLoad(llvm::Instruction *instruction); // Unlike the LLVM version of this function, this does not requires the // InstructionToReplace and the ValueToReplaceWith to be the same instruction // type static void ReplaceUsesOfWith(llvm::Instruction *InstructionToReplace, llvm::Value *ValueToReplaceWith); static ShaderRecordEntry FindRootSignatureDescriptor( const DxilVersionedRootSignatureDesc &rootSignatureDescriptor, unsigned int ShaderRecordIdentifierSizeInBytes, DXIL::ResourceClass resourceClass, unsigned int baseRegisterIndex, unsigned int registerSpace); // TODO: I would like to see these prefixed with m_ llvm::Value *ShaderTableHandle = nullptr; llvm::Value *DispatchRaysConstantsHandle = nullptr; llvm::Value *BaseShaderRecordOffset = nullptr; static const unsigned int NumViewTypes = 4; struct ViewKeyHasher { public: std::size_t operator()(const ViewKey &x) const { return std::hash<unsigned int>()((unsigned int)x.ViewType) ^ std::hash<unsigned int>()((unsigned int)x.StructuredStride); } }; std::unordered_map<ViewKey, llvm::Value *, ViewKeyHasher> TypeToAliasedDescriptorHeap[NumViewTypes]; llvm::Function *EntryPointFunction; ShaderInfo *pInputShaderInfo; const DxilVersionedRootSignatureDesc *pRootSignatureDesc; DXIL::ShaderKind ShaderKind; }; char DxilPatchShaderRecordBindings::ID = 0; // TODO: Find the right thing to do on failure void ThrowFailure() { throw std::exception(); } // TODO: Stolen from Brandon's code, merge // Remove ELF mangling static inline std::string GetUnmangledName(StringRef name) { if (!name.startswith("\x1?")) return name; size_t pos = name.find("@@"); if (pos == name.npos) return name; return name.substr(2, pos - 2); } static Function *getFunctionFromName(Module &M, const std::wstring &exportName) { for (auto F = M.begin(), E = M.end(); F != E; ++F) { std::wstring functionName = Unicode::UTF8ToWideStringOrThrow( GetUnmangledName(F->getName()).c_str()); if (exportName == functionName) { return F; } } return nullptr; } ModulePass *llvm::createDxilPatchShaderRecordBindingsPass() { return new DxilPatchShaderRecordBindings(); } INITIALIZE_PASS(DxilPatchShaderRecordBindings, "hlsl-dxil-patch-shader-record-bindings", "Patch shader record bindings to instead pull from the " "fallback provided bindings", false, false) void DxilPatchShaderRecordBindings::applyOptions(PassOptions O) { for (const auto &option : O) { if (0 == option.first.compare("root-signature")) { unsigned int cHexRadix = 16; pInputShaderInfo = (ShaderInfo *)strtoull(option.second.data(), nullptr, cHexRadix); pRootSignatureDesc = (const DxilVersionedRootSignatureDesc *) pInputShaderInfo->pRootSignatureDesc; } } } void AddAnnoationsIfNeeded(DxilModule &DM, llvm::StructType *StructTy, const std::string &FieldName, unsigned int numFields = 1) { auto pAnnotation = DM.GetTypeSystem().GetStructAnnotation(StructTy); if (pAnnotation == nullptr) { pAnnotation = DM.GetTypeSystem().AddStructAnnotation(StructTy); pAnnotation->SetCBufferSize(sizeof(uint32_t) * numFields); for (unsigned int i = 0; i < numFields; i++) { pAnnotation->GetFieldAnnotation(i).SetCBufferOffset(sizeof(uint32_t) * i); pAnnotation->GetFieldAnnotation(i).SetCompType( hlsl::DXIL::ComponentType::I32); pAnnotation->GetFieldAnnotation(i).SetFieldName(FieldName + std::to_string(i)); } } } unsigned int DxilPatchShaderRecordBindings::AddHandle( Module &M, unsigned int baseRegisterIndex, unsigned int rangeSize, unsigned int registerSpace, DXIL::ResourceClass resClass, DXIL::ResourceKind resKind, const std::string &bufferName, llvm::Type *type, unsigned int constantBufferSize) { LLVMContext &Ctx = M.getContext(); DxilModule &DM = M.GetOrCreateDxilModule(); // Set up a SRV with byte address buffer unsigned int resourceHandle; std::unique_ptr<DxilResource> pHandle; std::unique_ptr<DxilCBuffer> pCBuf; std::unique_ptr<DxilSampler> pSampler; DxilResourceBase *pBaseHandle; switch (resClass) { case DXIL::ResourceClass::SRV: resourceHandle = static_cast<unsigned int>(DM.GetSRVs().size()); pHandle = llvm::make_unique<DxilResource>(); pHandle->SetRW(false); pBaseHandle = pHandle.get(); break; case DXIL::ResourceClass::UAV: resourceHandle = static_cast<unsigned int>(DM.GetUAVs().size()); pHandle = llvm::make_unique<DxilResource>(); pHandle->SetRW(true); pBaseHandle = pHandle.get(); break; case DXIL::ResourceClass::CBuffer: resourceHandle = static_cast<unsigned int>(DM.GetCBuffers().size()); pCBuf = llvm::make_unique<DxilCBuffer>(); pCBuf->SetSize(constantBufferSize); pBaseHandle = pCBuf.get(); break; case DXIL::ResourceClass::Sampler: resourceHandle = static_cast<unsigned int>(DM.GetSamplers().size()); pSampler = llvm::make_unique<DxilSampler>(); // TODO: Is this okay? What if one of the samplers in the table is a // comparison sampler? pSampler->SetSamplerKind(DxilSampler::SamplerKind::Default); pBaseHandle = pSampler.get(); break; } if (!type) { SmallVector<llvm::Type *, 1> Elements{Type::getInt32Ty(Ctx)}; std::string ByteAddressBufferName = "struct.ByteAddressBuffer"; type = M.getTypeByName(ByteAddressBufferName); if (!type) { StructType *StructTy; type = StructTy = StructType::create(Elements, ByteAddressBufferName); AddAnnoationsIfNeeded(DM, StructTy, ByteAddressBufferName); } } GlobalVariable *GV = M.getGlobalVariable(bufferName); if (!GV) { GV = cast<GlobalVariable>(M.getOrInsertGlobal(bufferName, type)); } pBaseHandle->SetGlobalName(bufferName.c_str()); pBaseHandle->SetGlobalSymbol(GV); pBaseHandle->SetID(resourceHandle); pBaseHandle->SetSpaceID(registerSpace); pBaseHandle->SetLowerBound(baseRegisterIndex); pBaseHandle->SetRangeSize(rangeSize); pBaseHandle->SetKind(resKind); if (pHandle) { pHandle->SetGloballyCoherent(false); pHandle->SetHasCounter(false); pHandle->SetCompType(CompType::getF32()); // TODO: Need to handle all types } unsigned int ID; switch (resClass) { case DXIL::ResourceClass::SRV: ID = DM.AddSRV(std::move(pHandle)); break; case DXIL::ResourceClass::UAV: ID = DM.AddUAV(std::move(pHandle)); break; case DXIL::ResourceClass::CBuffer: ID = DM.AddCBuffer(std::move(pCBuf)); break; case DXIL::ResourceClass::Sampler: ID = DM.AddSampler(std::move(pSampler)); break; } assert(ID == resourceHandle); return ID; } unsigned int DxilPatchShaderRecordBindings::GetConstantBufferOffsetToShaderRecord() { switch (ShaderKind) { case DXIL::ShaderKind::ClosestHit: case DXIL::ShaderKind::AnyHit: case DXIL::ShaderKind::Intersection: return offsetof(DispatchRaysConstants, HitGroupShaderRecordStride); case DXIL::ShaderKind::Miss: return offsetof(DispatchRaysConstants, MissShaderRecordStride); default: ThrowFailure(); return -1; } } unsigned int DxilPatchShaderRecordBindings::AddSRVRawBuffer( Module &M, unsigned int registerIndex, unsigned int registerSpace, const std::string &bufferName) { return AddHandle(M, registerIndex, 1, registerSpace, DXIL::ResourceClass::SRV, DXIL::ResourceKind::RawBuffer, bufferName); } llvm::Constant *GetArraySymbol(Module &M, const std::string &bufferName) { LLVMContext &Ctx = M.getContext(); SmallVector<llvm::Type *, 1> Elements{Type::getInt32Ty(Ctx)}; llvm::StructType *StructTy = llvm::StructType::create(Elements, bufferName); llvm::ArrayType *ArrayTy = ArrayType::get(StructTy, -1); return UndefValue::get(ArrayTy->getPointerTo()); } unsigned int DxilPatchShaderRecordBindings::AddCBufferAliasedHandle( Module &M, unsigned int baseRegisterIndex, unsigned int registerSpace, const std::string &bufferName) { const unsigned int maxConstantBufferSize = 4096 * 16; return AddHandle(M, baseRegisterIndex, UINT_MAX, registerSpace, DXIL::ResourceClass::CBuffer, DXIL::ResourceKind::CBuffer, bufferName, GetArraySymbol(M, bufferName)->getType(), maxConstantBufferSize); } unsigned int DxilPatchShaderRecordBindings::AddAliasedHandle( Module &M, unsigned int baseRegisterIndex, unsigned int registerSpace, DXIL::ResourceClass resClass, DXIL::ResourceKind resKind, const std::string &bufferName, llvm::Type *type) { return AddHandle(M, baseRegisterIndex, UINT_MAX, registerSpace, resClass, resKind, bufferName, type); } // TODO: Stolen from Brandon's code DXIL::ShaderKind GetRayShaderKindCopy(Function *F) { if (F->hasFnAttribute("exp-shader")) return DXIL::ShaderKind::RayGeneration; DxilModule &DM = F->getParent()->GetDxilModule(); if (DM.HasDxilFunctionProps(F) && DM.GetDxilFunctionProps(F).IsRay()) return DM.GetDxilFunctionProps(F).shaderKind; return DXIL::ShaderKind::Invalid; } bool DxilPatchShaderRecordBindings::runOnModule(Module &M) { DxilModule &DM = M.GetOrCreateDxilModule(); EntryPointFunction = pInputShaderInfo->ExportName ? getFunctionFromName(M, pInputShaderInfo->ExportName) : DM.GetEntryFunction(); ShaderKind = GetRayShaderKindCopy(EntryPointFunction); ValidateParameters(); InitializeViewTable(); PatchShaderBindings(M); DM.ReEmitDxilResources(); return true; } void DxilPatchShaderRecordBindings::ValidateParameters() { if (!pInputShaderInfo || !pInputShaderInfo->pRootSignatureDesc) { throw std::exception(); } } DxilResourceBase &GetResourceFromID(DxilModule &DM, DXIL::ResourceClass resClass, unsigned int id) { switch (resClass) { case DXIL::ResourceClass::CBuffer: return DM.GetCBuffer(id); break; case DXIL::ResourceClass::SRV: return DM.GetSRV(id); break; case DXIL::ResourceClass::UAV: return DM.GetUAV(id); break; case DXIL::ResourceClass::Sampler: return DM.GetSampler(id); break; default: ThrowFailure(); llvm_unreachable("invalid resource class"); } } unsigned int FindOrInsertViewIntoList(const ViewKey &key, ViewKey *pViewList, unsigned int &numViews, unsigned int maxViews) { unsigned int viewIndex = 0; for (; viewIndex < numViews; viewIndex++) { if (pViewList[viewIndex] == key) { break; } } if (viewIndex == numViews) { if (viewIndex >= maxViews) { ThrowFailure(); } pViewList[viewIndex] = key; numViews++; } return viewIndex; } llvm::Value *DxilPatchShaderRecordBindings::GetAliasedDescriptorHeapHandle( Module &M, llvm::Type *type, DXIL::ResourceClass resClass, DXIL::ResourceKind resKind) { DxilModule &DM = M.GetOrCreateDxilModule(); unsigned int resClassIndex = (unsigned int)resClass; ViewKey key = {}; key.ViewType = (unsigned int)resKind; if (DXIL::IsStructuredBuffer(resKind)) { key.StructuredStride = type->getPrimitiveSizeInBits(); } else if (resKind != DXIL::ResourceKind::RawBuffer) { auto containedType = type->getContainedType(0); // If it's a vector, get the type of just a single element if (containedType->getNumContainedTypes() > 0) { assert(containedType->getNumContainedTypes() <= 4); containedType = containedType->getContainedType(0); } key.SRVComponentType = (unsigned int)CompType::GetCompType(containedType).GetKind(); } auto aliasedDescriptorHeapHandle = TypeToAliasedDescriptorHeap[resClassIndex].find(key); if (aliasedDescriptorHeapHandle == TypeToAliasedDescriptorHeap[resClassIndex].end()) { unsigned int registerSpaceOffset = 0; std::string HandleName; if (resClass == DXIL::ResourceClass::SRV) { registerSpaceOffset = FindOrInsertViewIntoList( key, pInputShaderInfo->pSRVRegisterSpaceArray, *pInputShaderInfo->pNumSRVSpaces, FallbackLayerNumDescriptorHeapSpacesPerView); HandleName = std::string("SRVDescriptorHeapTable") + std::to_string(registerSpaceOffset); } else if (resClass == DXIL::ResourceClass::UAV) { registerSpaceOffset = FindOrInsertViewIntoList( key, pInputShaderInfo->pUAVRegisterSpaceArray, *pInputShaderInfo->pNumUAVSpaces, FallbackLayerNumDescriptorHeapSpacesPerView); if (registerSpaceOffset == 0) { // Using the descriptor heap declared by the fallback for handling // emulated pointers, make sure the name is an exact match assert(key.ViewType == (unsigned int)hlsl::DXIL::ResourceKind::RawBuffer); HandleName = "\01?DescriptorHeapBufferTable@@3PAURWByteAddressBuffer@@A"; } else { HandleName = std::string("UAVDescriptorHeapTable") + std::to_string(registerSpaceOffset); } } else if (resClass == DXIL::ResourceClass::CBuffer) { HandleName = std::string("CBVDescriptorHeapTable"); } else { HandleName = std::string("SamplerDescriptorHeapTable"); } llvm::ArrayType *descriptorHeapType = ArrayType::get(type, 0); unsigned int id = AddAliasedHandle( M, FallbackLayerDescriptorHeapTable, FallbackLayerRegisterSpace + FallbackLayerDescriptorHeapSpaceOffset + registerSpaceOffset, resClass, resKind, HandleName, descriptorHeapType); TypeToAliasedDescriptorHeap[resClassIndex][key] = GetResourceFromID(DM, resClass, id).GetGlobalSymbol(); } return TypeToAliasedDescriptorHeap[resClassIndex][key]; } void DxilPatchShaderRecordBindings::AddInputBinding(Module &M) { DxilModule &DM = M.GetOrCreateDxilModule(); auto &EntryBlock = EntryPointFunction->getEntryBlock(); auto &Instructions = EntryBlock.getInstList(); std::string bufferName; unsigned int bufferRegister; switch (ShaderKind) { case DXIL::ShaderKind::AnyHit: case DXIL::ShaderKind::ClosestHit: case DXIL::ShaderKind::Intersection: bufferRegister = FallbackLayerHitGroupRecordByteAddressBufferRegister; bufferName = "\01?HitGroupShaderTable@@3UByteAddressBuffer@@A"; break; case DXIL::ShaderKind::Miss: bufferRegister = FallbackLayerMissShaderRecordByteAddressBufferRegister; bufferName = "\01?MissShaderTable@@3UByteAddressBuffer@@A"; break; case DXIL::ShaderKind::RayGeneration: bufferRegister = FallbackLayerRayGenShaderRecordByteAddressBufferRegister; bufferName = "\01?RayGenShaderTable@@3UByteAddressBuffer@@A"; break; case DXIL::ShaderKind::Callable: bufferRegister = FallbackLayerCallableShaderRecordByteAddressBufferRegister; bufferName = "\01?CallableShaderTable@@3UByteAddressBuffer@@A"; break; } unsigned int ShaderRecordID = AddSRVRawBuffer( M, bufferRegister, FallbackLayerRegisterSpace, bufferName); auto It = Instructions.begin(); OP *HlslOP = DM.GetOP(); LLVMContext &Ctx = M.getContext(); IRBuilder<> Builder(It); { auto ShaderTableName = "ShaderTableHandle"; llvm::Value *Symbol = DM.GetSRV(ShaderRecordID).GetGlobalSymbol(); llvm::Value *Load = Builder.CreateLoad(Symbol, "LoadShaderTableHandle"); Function *CreateHandleForLib = HlslOP->GetOpFunc(DXIL::OpCode::CreateHandleForLib, Load->getType()); Constant *CreateHandleOpcodeArg = HlslOP->GetU32Const((unsigned)DXIL::OpCode::CreateHandleForLib); ShaderTableHandle = Builder.CreateCall( CreateHandleForLib, {CreateHandleOpcodeArg, Load}, ShaderTableName); } { auto CbufferName = "Constants"; const unsigned int sizeOfConstantsInBytes = sizeof(DispatchRaysConstants); llvm::StructType *StructTy = M.getTypeByName(CbufferName); if (!StructTy) { const unsigned int numUintsInConstants = sizeOfConstantsInBytes / sizeof(unsigned int); SmallVector<llvm::Type *, numUintsInConstants> Elements( numUintsInConstants); for (unsigned int i = 0; i < numUintsInConstants; i++) { Elements[i] = Type::getInt32Ty(Ctx); } StructTy = llvm::StructType::create(Elements, CbufferName); AddAnnoationsIfNeeded(DM, StructTy, std::string(CbufferName), numUintsInConstants); } unsigned int handle = AddHandle(M, FallbackLayerDispatchConstantsRegister, 1, FallbackLayerRegisterSpace, DXIL::ResourceClass::CBuffer, DXIL::ResourceKind::CBuffer, CbufferName, StructTy, sizeOfConstantsInBytes); llvm::Value *Symbol = DM.GetCBuffer(handle).GetGlobalSymbol(); llvm::Value *Load = Builder.CreateLoad(Symbol, "DispatchRaysConstants"); Function *CreateHandleForLib = HlslOP->GetOpFunc(DXIL::OpCode::CreateHandleForLib, Load->getType()); Constant *CreateHandleOpcodeArg = HlslOP->GetU32Const((unsigned)DXIL::OpCode::CreateHandleForLib); DispatchRaysConstantsHandle = Builder.CreateCall( CreateHandleForLib, {CreateHandleOpcodeArg, Load}, CbufferName); } // Raygen always reads from the start so no offset calculations needed if (ShaderKind != DXIL::ShaderKind::RayGeneration) { std::string ShaderRecordOffsetFuncName = "\x1?Fallback_ShaderRecordOffset@@YAIXZ"; Function *ShaderRecordOffsetFunc = M.getFunction(ShaderRecordOffsetFuncName); if (!ShaderRecordOffsetFunc) { FunctionType *ShaderRecordOffsetFuncType = FunctionType::get(llvm::Type::getInt32Ty(Ctx), {}, false); ShaderRecordOffsetFunc = Function::Create(ShaderRecordOffsetFuncType, GlobalValue::LinkageTypes::ExternalLinkage, ShaderRecordOffsetFuncName, &M); } BaseShaderRecordOffset = Builder.CreateCall(ShaderRecordOffsetFunc, {}, "shaderRecordOffset"); } else { BaseShaderRecordOffset = HlslOP->GetU32Const(0); } } llvm::Value *DxilPatchShaderRecordBindings::CreateOffsetToShaderRecord( Module &M, IRBuilder<> &Builder, unsigned int RecordOffsetInBytes, llvm::Value *CbufferOffsetInBytes) { DxilModule &DM = M.GetOrCreateDxilModule(); OP *HlslOP = DM.GetOP(); // Create handle for the newly-added constant buffer (which is achieved via a // function call) auto AdddName = "ShaderRecordOffsetInBytes"; Constant *ShaderRecordOffsetInBytes = HlslOP->GetU32Const( RecordOffsetInBytes); // Offset of constants in shader record buffer return Builder.CreateAdd(CbufferOffsetInBytes, ShaderRecordOffsetInBytes, AdddName); } llvm::Value *DxilPatchShaderRecordBindings::CreateCBufferLoadLegacy( Module &M, IRBuilder<> &Builder, llvm::Value *ResourceHandle, unsigned int RowToLoad) { DxilModule &DM = M.GetOrCreateDxilModule(); OP *HlslOP = DM.GetOP(); LLVMContext &Ctx = M.getContext(); auto BufferLoadName = "ConstantBuffer"; Function *BufferLoad = HlslOP->GetOpFunc(DXIL::OpCode::CBufferLoadLegacy, Type::getInt32Ty(Ctx)); Constant *CBufferLoadOpcodeArg = HlslOP->GetU32Const((unsigned)DXIL::OpCode::CBufferLoadLegacy); Constant *RowToLoadConst = HlslOP->GetU32Const(RowToLoad); return Builder.CreateCall( BufferLoad, {CBufferLoadOpcodeArg, ResourceHandle, RowToLoadConst}, BufferLoadName); } llvm::Value *DxilPatchShaderRecordBindings::CreateShaderRecordBufferLoad( Module &M, IRBuilder<> &Builder, llvm::Value *ShaderRecordOffsetInBytes, llvm::Type *type) { DxilModule &DM = M.GetOrCreateDxilModule(); OP *HlslOP = DM.GetOP(); LLVMContext &Ctx = M.getContext(); // Create handle for the newly-added constant buffer (which is achieved via a // function call) auto BufferLoadName = "ShaderRecordBuffer"; if (type->getNumContainedTypes() > 1) { // TODO: Buffer loads aren't legal with container types, check if this is // the right wait to handle this type = type->getContainedType(0); } // TODO Do I need to check the result? Hopefully not Function *BufferLoad = HlslOP->GetOpFunc(DXIL::OpCode::BufferLoad, type); Constant *BufferLoadOpcodeArg = HlslOP->GetU32Const((unsigned)DXIL::OpCode::BufferLoad); Constant *Unused = UndefValue::get(llvm::Type::getInt32Ty(Ctx)); return Builder.CreateCall(BufferLoad, {BufferLoadOpcodeArg, ShaderTableHandle, ShaderRecordOffsetInBytes, Unused}, BufferLoadName); } void DxilPatchShaderRecordBindings::ReplaceUsesOfWith( llvm::Instruction *InstructionToReplace, llvm::Value *ValueToReplaceWith) { for (auto UserIter = InstructionToReplace->user_begin(); UserIter != InstructionToReplace->user_end();) { // Increment the iterator before the replace since the replace alters the // uses list auto userInstr = UserIter++; userInstr->replaceUsesOfWith(InstructionToReplace, ValueToReplaceWith); } InstructionToReplace->eraseFromParent(); } llvm::Value *DxilPatchShaderRecordBindings::CreateCBufferLoadOffsetInBytes( Module &M, IRBuilder<> &Builder, llvm::Instruction *instruction) { DxilModule &DM = M.GetOrCreateDxilModule(); OP *HlslOP = DM.GetOP(); DxilInst_CBufferLoad cbufferLoad(instruction); DxilInst_CBufferLoadLegacy cbufferLoadLegacy(instruction); if (cbufferLoad) { return cbufferLoad.get_byteOffset(); } else if (cbufferLoadLegacy) { Constant *LegacyMultiplier = HlslOP->GetU32Const(16); return Builder.CreateMul(cbufferLoadLegacy.get_regIndex(), LegacyMultiplier); } else { ThrowFailure(); return nullptr; } } bool DxilPatchShaderRecordBindings::IsCBufferLoad( llvm::Instruction *instruction) { DxilInst_CBufferLoad cbufferLoad(instruction); DxilInst_CBufferLoadLegacy cbufferLoadLegacy(instruction); return cbufferLoad || cbufferLoadLegacy; } unsigned int GetResolvedRangeID(DXIL::ResourceClass resClass, Value *rangeIdVal) { if (auto CI = dyn_cast<ConstantInt>(rangeIdVal)) { return CI->getZExtValue(); } else { assert(false); return 0; } } // TODO: This code is quite inefficient bool DxilPatchShaderRecordBindings::GetHandleInfo( Module &M, DxilInst_CreateHandleForLib &createHandleStructForLib, unsigned int &shaderRegister, unsigned int &registerSpace, DXIL::ResourceKind &kind, DXIL::ResourceClass &resClass, llvm::Type *&resType) { DxilModule &DM = M.GetOrCreateDxilModule(); LoadInst *loadRangeId = cast<LoadInst>(createHandleStructForLib.get_Resource()); Value *ResourceSymbol = loadRangeId->getPointerOperand(); DXIL::ResourceClass resourceClasses[] = { DXIL::ResourceClass::CBuffer, DXIL::ResourceClass::SRV, DXIL::ResourceClass::UAV, DXIL::ResourceClass::Sampler}; hlsl::DxilResourceBase *Resource = nullptr; for (auto &resourceClass : resourceClasses) { switch (resourceClass) { case DXIL::ResourceClass::CBuffer: { auto &cbuffers = DM.GetCBuffers(); for (auto &cbuffer : cbuffers) { if (cbuffer->GetGlobalSymbol() == ResourceSymbol) { Resource = cbuffer.get(); break; } } break; } case DXIL::ResourceClass::SRV: case DXIL::ResourceClass::UAV: { auto &viewList = resourceClass == DXIL::ResourceClass::SRV ? DM.GetSRVs() : DM.GetUAVs(); for (auto &view : viewList) { if (view->GetGlobalSymbol() == ResourceSymbol) { Resource = view.get(); break; } } break; } case DXIL::ResourceClass::Sampler: { auto &samplers = DM.GetSamplers(); for (auto &sampler : samplers) { if (sampler->GetGlobalSymbol() == ResourceSymbol) { Resource = sampler.get(); break; } } break; } } } if (Resource) { registerSpace = Resource->GetSpaceID(); shaderRegister = Resource->GetLowerBound(); kind = Resource->GetKind(); resClass = Resource->GetClass(); resType = Resource->GetHLSLType()->getPointerElementType(); } return Resource != nullptr; } llvm::Value *DxilPatchShaderRecordBindings::LoadShaderRecordData( Module &M, IRBuilder<> &Builder, llvm::Value *offsetToShaderRecord, unsigned int dataOffsetInShaderRecord) { DxilModule &DM = M.GetOrCreateDxilModule(); LLVMContext &Ctx = M.getContext(); OP *HlslOP = DM.GetOP(); Constant *dataOffset = HlslOP->GetU32Const(dataOffsetInShaderRecord); Value *shaderTableOffsetToData = Builder.CreateAdd(dataOffset, offsetToShaderRecord); return CreateShaderRecordBufferLoad(M, Builder, shaderTableOffsetToData, llvm::Type::getInt32Ty(Ctx)); } void DxilPatchShaderRecordBindings::PatchCreateHandleToUseDescriptorIndex( Module &M, IRBuilder<> &Builder, DXIL::ResourceKind &resourceKind, DXIL::ResourceClass &resourceClass, llvm::Type *resourceType, llvm::Value *descriptorIndex, DxilInst_CreateHandleForLib &createHandleInstr) { DxilModule &DM = M.GetOrCreateDxilModule(); OP *HlslOP = DM.GetOP(); llvm::Value *descriptorHeapSymbol = GetAliasedDescriptorHeapHandle( M, resourceType, resourceClass, resourceKind); llvm::Value *viewSymbol = Builder.CreateGEP( descriptorHeapSymbol, {HlslOP->GetU32Const(0), descriptorIndex}, "IndexIntoDH"); DxilMDHelper::MarkNonUniform(cast<Instruction>(viewSymbol)); llvm::Value *handle = Builder.CreateLoad(viewSymbol); auto callInst = cast<CallInst>(createHandleInstr.Instr); callInst->setCalledFunction( HlslOP->GetOpFunc(DXIL::OpCode::CreateHandleForLib, handle->getType())); createHandleInstr.set_Resource(handle); } void DxilPatchShaderRecordBindings::InitializeViewTable() { // The Fallback Layer declares a bindless raw buffer that spans the entire // descriptor heap, manually add it to the list of UAV register spaces used if (*pInputShaderInfo->pNumUAVSpaces == 0) { ViewKey key = {(unsigned int)hlsl::DXIL::ResourceKind::RawBuffer, {0}}; unsigned int index = FindOrInsertViewIntoList(key, pInputShaderInfo->pUAVRegisterSpaceArray, *pInputShaderInfo->pNumUAVSpaces, FallbackLayerNumDescriptorHeapSpacesPerView); (void)index; assert(index == 0); } } void DxilPatchShaderRecordBindings::PatchShaderBindings(Module &M) { DxilModule &DM = M.GetOrCreateDxilModule(); OP *HlslOP = DM.GetOP(); // Don't erase instructions until the very end because it throws off the // iterator std::vector<llvm::Instruction *> instructionsToRemove; for (BasicBlock &block : EntryPointFunction->getBasicBlockList()) { auto &Instructions = block.getInstList(); for (auto &instr : Instructions) { DxilInst_CreateHandleForLib createHandleForLib(&instr); if (createHandleForLib) { DXIL::ResourceClass resourceClass; unsigned int registerSpace; unsigned int registerIndex; DXIL::ResourceKind kind; llvm::Type *resType; bool resourceIsResolved = true; resourceIsResolved = GetHandleInfo(M, createHandleForLib, registerIndex, registerSpace, kind, resourceClass, resType); if (!resourceIsResolved) continue; // TODO: This shouldn't actually be happening? ShaderRecordEntry shaderRecord = FindRootSignatureDescriptor( *pRootSignatureDesc, pInputShaderInfo->ShaderRecordIdentifierSizeInBytes, resourceClass, registerIndex, registerSpace); const bool IsBindingSpecifiedInLocalRootSignature = !shaderRecord.IsInvalid(); if (IsBindingSpecifiedInLocalRootSignature) { if (!DispatchRaysConstantsHandle) { AddInputBinding(M); } switch (shaderRecord.ParameterType) { case DxilRootParameterType::Constants32Bit: { for (User *U : instr.users()) { llvm::Instruction *instruction = cast<CallInst>(U); if (IsCBufferLoad(instruction)) { llvm::Instruction *cbufferLoadInstr = instruction; IRBuilder<> Builder(cbufferLoadInstr); llvm::Value *cbufferOffsetInBytes = CreateCBufferLoadOffsetInBytes(M, Builder, cbufferLoadInstr); llvm::Value *LocalOffsetToRootConstant = CreateOffsetToShaderRecord(M, Builder, shaderRecord.RecordOffsetInBytes, cbufferOffsetInBytes); llvm::Value *GlobalOffsetToRootConstant = Builder.CreateAdd( LocalOffsetToRootConstant, BaseShaderRecordOffset); llvm::Value *srvBufferLoad = CreateShaderRecordBufferLoad( M, Builder, GlobalOffsetToRootConstant, cbufferLoadInstr->getType()); ReplaceUsesOfWith(cbufferLoadInstr, srvBufferLoad); } else { ThrowFailure(); } } instructionsToRemove.push_back(&instr); break; } case DxilRootParameterType::DescriptorTable: { IRBuilder<> Builder(&instr); llvm::Value *srvBufferLoad = LoadShaderRecordData(M, Builder, BaseShaderRecordOffset, shaderRecord.RecordOffsetInBytes); llvm::Value *DescriptorTableEntryLo = Builder.CreateExtractValue( srvBufferLoad, 0, "DescriptorTableHandleLo"); unsigned int offsetToLoadInUints = offsetof(DispatchRaysConstants, SrvCbvUavDescriptorHeapStart) / sizeof(uint32_t); unsigned int uintsPerRow = 4; unsigned int rowToLoad = offsetToLoadInUints / uintsPerRow; unsigned int extractValueOffset = offsetToLoadInUints % uintsPerRow; llvm::Value *DescHeapConstants = CreateCBufferLoadLegacy( M, Builder, DispatchRaysConstantsHandle, rowToLoad); llvm::Value *DescriptorHeapStartAddressLo = Builder.CreateExtractValue(DescHeapConstants, extractValueOffset, "DescriptorHeapStartHandleLo"); // TODO: The hi bits can only be ignored if the difference is // guaranteed to be < 32 bytes. This is an unsafe assumption, // particularly given large descriptor sizes llvm::Value *DescriptorTableOffsetInBytes = Builder.CreateSub( DescriptorTableEntryLo, DescriptorHeapStartAddressLo, "TableOffsetInBytes"); Constant *DescriptorSizeInBytes = HlslOP->GetU32Const( pInputShaderInfo->SrvCbvUavDescriptorSizeInBytes); llvm::Value *DescriptorTableStartIndex = Builder.CreateExactUDiv( DescriptorTableOffsetInBytes, DescriptorSizeInBytes, "TableStartIndex"); Constant *RecordOffset = HlslOP->GetU32Const(shaderRecord.OffsetInDescriptors); llvm::Value *BaseDescriptorIndex = Builder.CreateAdd( DescriptorTableStartIndex, RecordOffset, "BaseDescriptorIndex"); // TODO: Not supporting dynamic indexing yet, should be pulled from // CreateHandleForLib If dynamic indexing is being used, add the // apps index on top of the calculated index llvm::Value *DynamicIndex = HlslOP->GetU32Const(0); llvm::Value *DescriptorIndex = Builder.CreateAdd( BaseDescriptorIndex, DynamicIndex, "DescriptorIndex"); PatchCreateHandleToUseDescriptorIndex( M, Builder, kind, resourceClass, resType, DescriptorIndex, createHandleForLib); break; } case DxilRootParameterType::CBV: case DxilRootParameterType::SRV: case DxilRootParameterType::UAV: { IRBuilder<> Builder(&instr); llvm::Value *srvBufferLoad = LoadShaderRecordData(M, Builder, BaseShaderRecordOffset, shaderRecord.RecordOffsetInBytes); llvm::Value *DescriptorIndex = Builder.CreateExtractValue( srvBufferLoad, 1, "DescriptorHeapIndex"); // TODO: Handle offset in bytes // llvm::Value *OffsetInBytes = Builder.CreateExtractValue( // srvBufferLoad, 0, "OffsetInBytes"); PatchCreateHandleToUseDescriptorIndex( M, Builder, kind, resourceClass, resType, DescriptorIndex, createHandleForLib); break; } default: ThrowFailure(); break; } } } } } for (auto instruction : instructionsToRemove) { instruction->eraseFromParent(); } } bool IsParameterTypeCompatibleWithResourceClass( DXIL::ResourceClass resourceClass, DxilRootParameterType parameterType) { switch (parameterType) { case DxilRootParameterType::DescriptorTable: return true; case DxilRootParameterType::Constants32Bit: case DxilRootParameterType::CBV: return resourceClass == DXIL::ResourceClass::CBuffer; case DxilRootParameterType::SRV: return resourceClass == DXIL::ResourceClass::SRV; case DxilRootParameterType::UAV: return resourceClass == DXIL::ResourceClass::UAV; default: ThrowFailure(); return false; } } DxilRootParameterType ConvertD3D12ParameterTypeToDxil(DxilRootParameterType parameter) { switch (parameter) { case DxilRootParameterType::Constants32Bit: return DxilRootParameterType::Constants32Bit; case DxilRootParameterType::DescriptorTable: return DxilRootParameterType::DescriptorTable; case DxilRootParameterType::CBV: return DxilRootParameterType::CBV; case DxilRootParameterType::SRV: return DxilRootParameterType::SRV; case DxilRootParameterType::UAV: return DxilRootParameterType::UAV; } assert(false); return (DxilRootParameterType)-1; } DXIL::ResourceClass ConvertD3D12RangeTypeToDxil(DxilDescriptorRangeType rangeType) { switch (rangeType) { case DxilDescriptorRangeType::SRV: return DXIL::ResourceClass::SRV; case DxilDescriptorRangeType::UAV: return DXIL::ResourceClass::UAV; case DxilDescriptorRangeType::CBV: return DXIL::ResourceClass::CBuffer; case DxilDescriptorRangeType::Sampler: return DXIL::ResourceClass::Sampler; } assert(false); return (DXIL::ResourceClass)-1; } unsigned int GetParameterTypeAlignment(DxilRootParameterType parameterType) { switch (parameterType) { case DxilRootParameterType::DescriptorTable: return SizeofD3D12GpuDescriptorHandle; case DxilRootParameterType::Constants32Bit: return sizeof(uint32_t); case DxilRootParameterType::CBV: // fallthrough case DxilRootParameterType::SRV: // fallthrough case DxilRootParameterType::UAV: return SizeofD3D12GpuVA; default: return UINT_MAX; } } template <typename TD3D12_ROOT_SIGNATURE_DESC> ShaderRecordEntry FindRootSignatureDescriptorHelper( const TD3D12_ROOT_SIGNATURE_DESC &rootSignatureDescriptor, unsigned int ShaderRecordIdentifierSizeInBytes, DXIL::ResourceClass resourceClass, unsigned int baseRegisterIndex, unsigned int registerSpace) { // Automatically fail if it's looking for a fallback binding as these never // need to be patched if (registerSpace != FallbackLayerRegisterSpace) { unsigned int recordOffset = ShaderRecordIdentifierSizeInBytes; for (unsigned int rootParamIndex = 0; rootParamIndex < rootSignatureDescriptor.NumParameters; rootParamIndex++) { auto &rootParam = rootSignatureDescriptor.pParameters[rootParamIndex]; auto dxilParamType = ConvertD3D12ParameterTypeToDxil(rootParam.ParameterType); #define ALIGN(alignment, num) (((num + alignment - 1) / alignment) * alignment) recordOffset = ALIGN(GetParameterTypeAlignment(rootParam.ParameterType), recordOffset); switch (rootParam.ParameterType) { case DxilRootParameterType::Constants32Bit: if (IsParameterTypeCompatibleWithResourceClass(resourceClass, dxilParamType) && baseRegisterIndex == rootParam.Constants.ShaderRegister && registerSpace == rootParam.Constants.RegisterSpace) { return {dxilParamType, recordOffset, 0}; } recordOffset += rootParam.Constants.Num32BitValues * sizeof(uint32_t); break; case DxilRootParameterType::DescriptorTable: { auto &descriptorTable = rootParam.DescriptorTable; unsigned int rangeOffsetInDescriptors = 0; for (unsigned int rangeIndex = 0; rangeIndex < descriptorTable.NumDescriptorRanges; rangeIndex++) { auto &range = descriptorTable.pDescriptorRanges[rangeIndex]; if (range.OffsetInDescriptorsFromTableStart != (unsigned)-1) { rangeOffsetInDescriptors = range.OffsetInDescriptorsFromTableStart; } if (ConvertD3D12RangeTypeToDxil(range.RangeType) == resourceClass && range.RegisterSpace == registerSpace && range.BaseShaderRegister <= baseRegisterIndex && range.BaseShaderRegister + range.NumDescriptors > baseRegisterIndex) { rangeOffsetInDescriptors += baseRegisterIndex - range.BaseShaderRegister; return {dxilParamType, recordOffset, rangeOffsetInDescriptors}; } rangeOffsetInDescriptors += range.NumDescriptors; } recordOffset += SizeofD3D12GpuDescriptorHandle; break; } case DxilRootParameterType::CBV: case DxilRootParameterType::SRV: case DxilRootParameterType::UAV: if (IsParameterTypeCompatibleWithResourceClass(resourceClass, dxilParamType) && baseRegisterIndex == rootParam.Descriptor.ShaderRegister && registerSpace == rootParam.Descriptor.RegisterSpace) { return {dxilParamType, recordOffset, 0}; } recordOffset += SizeofD3D12GpuVA; break; } } } return ShaderRecordEntry::InvalidEntry(); } // TODO: Consider pre-calculating this into a map ShaderRecordEntry DxilPatchShaderRecordBindings::FindRootSignatureDescriptor( const DxilVersionedRootSignatureDesc &rootSignatureDescriptor, unsigned int ShaderRecordIdentifierSizeInBytes, DXIL::ResourceClass resourceClass, unsigned int baseRegisterIndex, unsigned int registerSpace) { switch (rootSignatureDescriptor.Version) { case DxilRootSignatureVersion::Version_1_0: return FindRootSignatureDescriptorHelper( rootSignatureDescriptor.Desc_1_0, ShaderRecordIdentifierSizeInBytes, resourceClass, baseRegisterIndex, registerSpace); case DxilRootSignatureVersion::Version_1_1: return FindRootSignatureDescriptorHelper( rootSignatureDescriptor.Desc_1_1, ShaderRecordIdentifierSizeInBytes, resourceClass, baseRegisterIndex, registerSpace); default: ThrowFailure(); return ShaderRecordEntry::InvalidEntry(); } }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilPoisonValues.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilPoisonValues.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Allows insertion of poisoned values with error messages that get // // cleaned up late in the compiler. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/DxilPoisonValues.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; constexpr const char kPoisonPrefix[] = "dx.poison."; namespace hlsl { Value *CreatePoisonValue(Type *ty, const Twine &errMsg, DebugLoc DL, Instruction *InsertPt) { std::string functionName; { llvm::raw_string_ostream os(functionName); os << kPoisonPrefix; os << *ty; os.flush(); } Module &M = *InsertPt->getModule(); LLVMContext &C = M.getContext(); Type *argTypes[] = {Type::getMetadataTy(C)}; FunctionType *ft = FunctionType::get(ty, argTypes, false); Constant *f = M.getOrInsertFunction(functionName, ft); std::string errMsgStr = errMsg.str(); Value *args[] = {MetadataAsValue::get(C, MDString::get(C, errMsgStr))}; CallInst *ret = CallInst::Create(f, ArrayRef<Value *>(args), "err", InsertPt); ret->setDebugLoc(DL); return ret; } bool FinalizePoisonValues(Module &M) { bool changed = false; LLVMContext &Ctx = M.getContext(); for (auto it = M.begin(); it != M.end();) { Function *F = &*(it++); if (F->getName().startswith(kPoisonPrefix)) { for (auto it = F->user_begin(); it != F->user_end();) { User *U = *(it++); CallInst *call = cast<CallInst>(U); MDString *errMsgMD = cast<MDString>( cast<MetadataAsValue>(call->getArgOperand(0))->getMetadata()); StringRef errMsg = errMsgMD->getString(); Ctx.diagnose( DiagnosticInfoDxil(F, call->getDebugLoc(), errMsg, DS_Error)); if (!call->getType()->isVoidTy()) call->replaceAllUsesWith(UndefValue::get(call->getType())); call->eraseFromParent(); } F->eraseFromParent(); changed = true; } } return changed; } } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/PauseResumePasses.cpp
/////////////////////////////////////////////////////////////////////////////// // // // PauseResumePasses.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Passes to pause/resume pipeline. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/DxilGenerationPass.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" using namespace llvm; using namespace hlsl; static const char kPauseResumeMDName[] = "pauseresume"; static const char kPauseResumeNumFields = 2; static const char kPauseResumePassNameToPause = 0; static const char kPauseResumePassNameToResume = 1; namespace hlsl { bool ClearPauseResumePasses(Module &M) { NamedMDNode *N = M.getNamedMetadata(kPauseResumeMDName); if (N) { M.eraseNamedMetadata(N); return true; } return false; } void GetPauseResumePasses(Module &M, StringRef &pause, StringRef &resume) { NamedMDNode *N = M.getNamedMetadata(kPauseResumeMDName); if (N && N->getNumOperands() > 0) { MDNode *MD = N->getOperand(0); pause = dyn_cast<MDString>(MD->getOperand(kPauseResumePassNameToPause).get()) ->getString(); resume = dyn_cast<MDString>(MD->getOperand(kPauseResumePassNameToResume).get()) ->getString(); } } void SetPauseResumePasses(Module &M, StringRef pause, StringRef resume) { LLVMContext &Ctx = M.getContext(); NamedMDNode *N = M.getOrInsertNamedMetadata(kPauseResumeMDName); Metadata *MDs[kPauseResumeNumFields]; MDs[(int)kPauseResumePassNameToPause] = MDString::get(Ctx, pause); MDs[(int)kPauseResumePassNameToResume] = MDString::get(Ctx, resume); if (N->getNumOperands() == 0) N->addOperand(MDNode::get(Ctx, MDs)); else N->setOperand(kPauseResumePassNameToPause, MDNode::get(Ctx, MDs)); } } // namespace hlsl namespace { class NoPausePasses : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit NoPausePasses() : ModulePass(ID) {} StringRef getPassName() const override { return "NoPausePasses"; } bool runOnModule(Module &M) override { return ClearPauseResumePasses(M); } }; class PausePasses : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit PausePasses() : ModulePass(ID) {} StringRef getPassName() const override { return "PausePasses"; } bool runOnModule(Module &M) override { StringRef pause, resume; hlsl::GetPauseResumePasses(M, pause, resume); if (!pause.empty()) { const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(pause); std::unique_ptr<ModulePass> pass((ModulePass *)PI->createPass()); pass->setOSOverride(OSOverride); return pass->runOnModule(M); } return false; } }; class ResumePasses : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit ResumePasses() : ModulePass(ID) {} StringRef getPassName() const override { return "ResumePasses"; } bool runOnModule(Module &M) override { StringRef pause, resume; hlsl::GetPauseResumePasses(M, pause, resume); if (!resume.empty()) { const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(resume); std::unique_ptr<ModulePass> pass((ModulePass *)PI->createPass()); pass->setOSOverride(OSOverride); return pass->runOnModule(M); } return false; } }; char NoPausePasses::ID = 0; char PausePasses::ID = 0; char ResumePasses::ID = 0; } // namespace ModulePass *llvm::createNoPausePassesPass() { return new NoPausePasses(); } ModulePass *llvm::createPausePassesPass() { return new PausePasses(); } ModulePass *llvm::createResumePassesPass() { return new ResumePasses(); } INITIALIZE_PASS(NoPausePasses, "hlsl-passes-nopause", "Clears metadata used for pause and resume", false, false) INITIALIZE_PASS(PausePasses, "hlsl-passes-pause", "Prepare to pause passes", false, false) INITIALIZE_PASS(ResumePasses, "hlsl-passes-resume", "Prepare to resume passes", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLOperationLowerExtension.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLOperationLowerExtension.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/HLOperationLowerExtension.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/HLSL/HLModule.h" #include "dxc/HLSL/HLOperationLower.h" #include "dxc/HLSL/HLOperations.h" #include "dxc/HlslIntrinsicOp.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringRef.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/YAMLParser.h" #include "llvm/Support/raw_os_ostream.h" using namespace llvm; using namespace hlsl; LLVM_ATTRIBUTE_NORETURN static void ThrowExtensionError(StringRef Details) { std::string Msg = (Twine("Error in dxc extension api: ") + Details).str(); throw hlsl::Exception(DXC_E_EXTENSION_ERROR, Msg); } // The lowering strategy format is a string that matches the following regex: // // [a-z](:(?P<ExtraStrategyInfo>.+))?$ // // The first character indicates the strategy with an optional : followed by // additional lowering information specific to that strategy. // ExtensionLowering::Strategy ExtensionLowering::GetStrategy(StringRef strategy) { if (strategy.size() < 1) return Strategy::Unknown; switch (strategy[0]) { case 'n': return Strategy::NoTranslation; case 'r': return Strategy::Replicate; case 'p': return Strategy::Pack; case 'm': return Strategy::Resource; case 'd': return Strategy::Dxil; case 'c': return Strategy::Custom; default: break; } return Strategy::Unknown; } llvm::StringRef ExtensionLowering::GetStrategyName(Strategy strategy) { switch (strategy) { case Strategy::NoTranslation: return "n"; case Strategy::Replicate: return "r"; case Strategy::Pack: return "p"; case Strategy::Resource: return "m"; // m for resource method case Strategy::Dxil: return "d"; case Strategy::Custom: return "c"; default: break; } return "?"; } static std::string ParseExtraStrategyInfo(StringRef strategy) { std::pair<StringRef, StringRef> SplitInfo = strategy.split(":"); return SplitInfo.second; } ExtensionLowering::ExtensionLowering(Strategy strategy, HLSLExtensionsCodegenHelper *helper, OP &hlslOp, HLResourceLookup &hlResourceLookup) : m_strategy(strategy), m_helper(helper), m_hlslOp(hlslOp), m_hlResourceLookup(hlResourceLookup) {} ExtensionLowering::ExtensionLowering(StringRef strategy, HLSLExtensionsCodegenHelper *helper, OP &hlslOp, HLResourceLookup &hlResourceLookup) : ExtensionLowering(GetStrategy(strategy), helper, hlslOp, hlResourceLookup) { m_extraStrategyInfo = ParseExtraStrategyInfo(strategy); } llvm::Value *ExtensionLowering::Translate(llvm::CallInst *CI) { switch (m_strategy) { case Strategy::NoTranslation: return NoTranslation(CI); case Strategy::Replicate: return Replicate(CI); case Strategy::Pack: return Pack(CI); case Strategy::Resource: return Resource(CI); case Strategy::Dxil: return Dxil(CI); case Strategy::Custom: return Custom(CI); default: break; } return Unknown(CI); } llvm::Value *ExtensionLowering::Unknown(CallInst *CI) { assert(false && "unknown translation strategy"); return nullptr; } // Interface to describe how to translate types from HL-dxil to dxil. class FunctionTypeTranslator { public: // Arguments can be exploded into multiple copies of the same type. // For example a <2 x i32> could become { i32, 2 } if the vector // is expanded in place or { i32, 1 } if the call is replicated. struct ArgumentType { Type *type; int count; ArgumentType(Type *ty, int cnt = 1) : type(ty), count(cnt) {} }; virtual ~FunctionTypeTranslator() {} virtual Type *TranslateReturnType(CallInst *CI) = 0; virtual ArgumentType TranslateArgumentType(Value *OrigArg) = 0; }; // Class to create the new function with the translated types for low-level // dxil. class FunctionTranslator { public: template <typename TypeTranslator> static Function *GetLoweredFunction(CallInst *CI, ExtensionLowering &lower) { TypeTranslator typeTranslator; return GetLoweredFunction(typeTranslator, CI, lower); } static Function *GetLoweredFunction(FunctionTypeTranslator &typeTranslator, CallInst *CI, ExtensionLowering &lower) { FunctionTranslator translator(typeTranslator, lower); return translator.GetLoweredFunction(CI); } virtual ~FunctionTranslator() {} protected: FunctionTypeTranslator &m_typeTranslator; ExtensionLowering &m_lower; FunctionTranslator(FunctionTypeTranslator &typeTranslator, ExtensionLowering &lower) : m_typeTranslator(typeTranslator), m_lower(lower) {} Function *GetLoweredFunction(CallInst *CI) { // Ge the return type of replicated function. Type *RetTy = m_typeTranslator.TranslateReturnType(CI); if (!RetTy) return nullptr; // Get the Function type for replicated function. FunctionType *FTy = GetFunctionType(CI, RetTy); if (!FTy) return nullptr; // Create a new function that will be the replicated call. AttributeSet attributes = GetAttributeSet(CI); std::string name = m_lower.GetExtensionName(CI); return cast<Function>( CI->getModule()->getOrInsertFunction(name, FTy, attributes)); } virtual FunctionType *GetFunctionType(CallInst *CI, Type *RetTy) { // Create a new function type with the translated argument. SmallVector<Type *, 10> ParamTypes; ParamTypes.reserve(CI->getNumArgOperands()); for (unsigned i = 0; i < CI->getNumArgOperands(); ++i) { Value *OrigArg = CI->getArgOperand(i); FunctionTypeTranslator::ArgumentType newArgType = m_typeTranslator.TranslateArgumentType(OrigArg); for (int i = 0; i < newArgType.count; ++i) { ParamTypes.push_back(newArgType.type); } } const bool IsVarArg = false; return FunctionType::get(RetTy, ParamTypes, IsVarArg); } AttributeSet GetAttributeSet(CallInst *CI) { Function *F = CI->getCalledFunction(); AttributeSet attributes; auto copyAttribute = [=, &attributes](Attribute::AttrKind a) { if (F->hasFnAttribute(a)) { attributes = attributes.addAttribute(CI->getContext(), AttributeSet::FunctionIndex, a); } }; copyAttribute(Attribute::AttrKind::ReadOnly); copyAttribute(Attribute::AttrKind::ReadNone); copyAttribute(Attribute::AttrKind::ArgMemOnly); copyAttribute(Attribute::AttrKind::NoUnwind); return attributes; } }; /////////////////////////////////////////////////////////////////////////////// // NoTranslation Lowering. class NoTranslationTypeTranslator : public FunctionTypeTranslator { virtual Type *TranslateReturnType(CallInst *CI) override { return CI->getType(); } virtual ArgumentType TranslateArgumentType(Value *OrigArg) override { return ArgumentType(OrigArg->getType()); } }; llvm::Value *ExtensionLowering::NoTranslation(CallInst *CI) { Function *NoTranslationFunction = FunctionTranslator::GetLoweredFunction<NoTranslationTypeTranslator>( CI, *this); if (!NoTranslationFunction) return nullptr; IRBuilder<> builder(CI); SmallVector<Value *, 8> args(CI->arg_operands().begin(), CI->arg_operands().end()); return builder.CreateCall(NoTranslationFunction, args); } /////////////////////////////////////////////////////////////////////////////// // Replicated Lowering. enum { NO_COMMON_VECTOR_SIZE = 0x0, }; // Find the vector size that will be used for replication. // The function call will be replicated once for each element of the vector // size. static unsigned GetReplicatedVectorSize(llvm::CallInst *CI) { unsigned commonVectorSize = NO_COMMON_VECTOR_SIZE; Type *RetTy = CI->getType(); if (RetTy->isVectorTy()) commonVectorSize = RetTy->getVectorNumElements(); for (unsigned i = 0; i < CI->getNumArgOperands(); ++i) { Type *Ty = CI->getArgOperand(i)->getType(); if (Ty->isVectorTy()) { unsigned vectorSize = Ty->getVectorNumElements(); if (commonVectorSize != NO_COMMON_VECTOR_SIZE && commonVectorSize != vectorSize) { // Inconsistent vector sizes; need a different strategy. return NO_COMMON_VECTOR_SIZE; } commonVectorSize = vectorSize; } } return commonVectorSize; } class ReplicatedFunctionTypeTranslator : public FunctionTypeTranslator { virtual Type *TranslateReturnType(CallInst *CI) override { unsigned commonVectorSize = GetReplicatedVectorSize(CI); if (commonVectorSize == NO_COMMON_VECTOR_SIZE) return nullptr; // Result should be vector or void. Type *RetTy = CI->getType(); if (!RetTy->isVoidTy() && !RetTy->isVectorTy()) return nullptr; if (RetTy->isVectorTy()) { RetTy = RetTy->getVectorElementType(); } return RetTy; } virtual ArgumentType TranslateArgumentType(Value *OrigArg) override { Type *Ty = OrigArg->getType(); if (Ty->isVectorTy()) { Ty = Ty->getVectorElementType(); } return ArgumentType(Ty); } }; class ReplicateCall { public: ReplicateCall(CallInst *CI, Function &ReplicatedFunction) : m_CI(CI), m_ReplicatedFunction(ReplicatedFunction), m_numReplicatedCalls(GetReplicatedVectorSize(CI)), m_ScalarizeArgIdx(), m_Args(CI->getNumArgOperands()), m_ReplicatedCalls(m_numReplicatedCalls), m_Builder(CI) { assert(m_numReplicatedCalls != NO_COMMON_VECTOR_SIZE); } Value *Generate() { CollectReplicatedArguments(); CreateReplicatedCalls(); Value *retVal = GetReturnValue(); return retVal; } private: CallInst *m_CI; Function &m_ReplicatedFunction; unsigned m_numReplicatedCalls; SmallVector<unsigned, 10> m_ScalarizeArgIdx; SmallVector<Value *, 10> m_Args; SmallVector<Value *, 10> m_ReplicatedCalls; IRBuilder<> m_Builder; // Collect replicated arguments. // For non-vector arguments we can add them to the args list directly. // These args will be shared by each replicated call. For the vector // arguments we remember the position it will go in the argument list. // We will fill in the vector args below when we replicate the call // (once for each vector lane). void CollectReplicatedArguments() { for (unsigned i = 0; i < m_CI->getNumArgOperands(); ++i) { Type *Ty = m_CI->getArgOperand(i)->getType(); if (Ty->isVectorTy()) { m_ScalarizeArgIdx.push_back(i); } else { m_Args[i] = m_CI->getArgOperand(i); } } } // Create replicated calls. // Replicate the call once for each element of the replicated vector size. void CreateReplicatedCalls() { for (unsigned vecIdx = 0; vecIdx < m_numReplicatedCalls; vecIdx++) { for (unsigned i = 0, e = m_ScalarizeArgIdx.size(); i < e; ++i) { unsigned argIdx = m_ScalarizeArgIdx[i]; Value *arg = m_CI->getArgOperand(argIdx); m_Args[argIdx] = m_Builder.CreateExtractElement(arg, vecIdx); } Value *EltOP = m_Builder.CreateCall(&m_ReplicatedFunction, m_Args); m_ReplicatedCalls[vecIdx] = EltOP; } } // Get the final replicated value. // If the function is a void type then return (arbitrarily) the first call. // We do not return nullptr because that indicates a failure to replicate. // If the function is a vector type then aggregate all of the replicated // call values into a new vector. Value *GetReturnValue() { if (m_CI->getType()->isVoidTy()) return m_ReplicatedCalls.back(); Value *retVal = llvm::UndefValue::get(m_CI->getType()); for (unsigned i = 0; i < m_ReplicatedCalls.size(); ++i) retVal = m_Builder.CreateInsertElement(retVal, m_ReplicatedCalls[i], i); return retVal; } }; // Translate the HL call by replicating the call for each vector element. // // For example, // // <2xi32> %r = call @ext.foo(i32 %op, <2xi32> %v) // ==> // %r.1 = call @ext.foo.s(i32 %op, i32 %v.1) // %r.2 = call @ext.foo.s(i32 %op, i32 %v.2) // <2xi32> %r.v.1 = insertelement %r.1, 0, <2xi32> undef // <2xi32> %r.v.2 = insertelement %r.2, 1, %r.v.1 // // You can then RAWU %r with %r.v.2. The RAWU is not done by the translate // function. Value *ExtensionLowering::Replicate(CallInst *CI) { Function *ReplicatedFunction = FunctionTranslator::GetLoweredFunction<ReplicatedFunctionTypeTranslator>( CI, *this); if (!ReplicatedFunction) return NoTranslation(CI); ReplicateCall replicate(CI, *ReplicatedFunction); return replicate.Generate(); } /////////////////////////////////////////////////////////////////////////////// // Helper functions static VectorType *ConvertStructTypeToVectorType(Type *structTy) { assert(structTy->isStructTy()); return VectorType::get(structTy->getStructElementType(0), structTy->getStructNumElements()); } static Value *PackStructIntoVector(IRBuilder<> &builder, Value *strukt) { Type *vecTy = ConvertStructTypeToVectorType(strukt->getType()); Value *packed = UndefValue::get(vecTy); unsigned numElements = vecTy->getVectorNumElements(); for (unsigned i = 0; i < numElements; ++i) { Value *element = builder.CreateExtractValue(strukt, i); packed = builder.CreateInsertElement(packed, element, i); } return packed; } static StructType *ConvertVectorTypeToStructType(Type *vecTy) { assert(vecTy->isVectorTy()); Type *elementTy = vecTy->getVectorElementType(); unsigned numElements = vecTy->getVectorNumElements(); SmallVector<Type *, 4> elements; for (unsigned i = 0; i < numElements; ++i) elements.push_back(elementTy); return StructType::get(vecTy->getContext(), elements); } static Value *PackVectorIntoStruct(IRBuilder<> &builder, Value *vec) { StructType *structTy = ConvertVectorTypeToStructType(vec->getType()); Value *packed = UndefValue::get(structTy); unsigned numElements = structTy->getStructNumElements(); for (unsigned i = 0; i < numElements; ++i) { Value *element = builder.CreateExtractElement(vec, i); packed = builder.CreateInsertValue(packed, element, {i}); } return packed; } /////////////////////////////////////////////////////////////////////////////// // Packed Lowering. class PackCall { public: PackCall(CallInst *CI, Function &PackedFunction) : m_CI(CI), m_packedFunction(PackedFunction), m_builder(CI) {} Value *Generate() { SmallVector<Value *, 10> args; PackArgs(args); Value *result = CreateCall(args); return UnpackResult(result); } private: CallInst *m_CI; Function &m_packedFunction; IRBuilder<> m_builder; void PackArgs(SmallVectorImpl<Value *> &args) { args.clear(); for (Value *arg : m_CI->arg_operands()) { if (arg->getType()->isVectorTy()) arg = PackVectorIntoStruct(m_builder, arg); args.push_back(arg); } } Value *CreateCall(const SmallVectorImpl<Value *> &args) { return m_builder.CreateCall(&m_packedFunction, args); } Value *UnpackResult(Value *result) { if (result->getType()->isStructTy()) { result = PackStructIntoVector(m_builder, result); } return result; } }; class PackedFunctionTypeTranslator : public FunctionTypeTranslator { virtual Type *TranslateReturnType(CallInst *CI) override { return TranslateIfVector(CI->getType()); } virtual ArgumentType TranslateArgumentType(Value *OrigArg) override { return ArgumentType(TranslateIfVector(OrigArg->getType())); } Type *TranslateIfVector(Type *ty) { if (ty->isVectorTy()) ty = ConvertVectorTypeToStructType(ty); return ty; } }; Value *ExtensionLowering::Pack(CallInst *CI) { Function *PackedFunction = FunctionTranslator::GetLoweredFunction<PackedFunctionTypeTranslator>( CI, *this); if (!PackedFunction) return NoTranslation(CI); PackCall pack(CI, *PackedFunction); Value *result = pack.Generate(); return result; } /////////////////////////////////////////////////////////////////////////////// // Resource Lowering. // Modify a call to a resouce method. Makes the following transformation: // // 1. Convert non-void return value to dx.types.ResRet. // 2. Expand vectors in place as separate arguments. // // Example // ----------------------------------------------------------------------------- // // %0 = call <2 x float> MyBufferOp(i32 138, %class.Buffer %3, <2 x i32> <1 , // 2> ) %r = call %dx.types.ResRet.f32 MyBufferOp(i32 138, %dx.types.Handle // %buf, i32 1, i32 2 ) %x = extractvalue %r, 0 %y = extractvalue %r, 1 %v = <2 // x float> undef %v.1 = insertelement %v, %x, 0 %v.2 = insertelement %v.1, // %y, 1 class ResourceMethodCall { public: ResourceMethodCall(CallInst *CI) : m_CI(CI), m_builder(CI) {} virtual ~ResourceMethodCall() {} virtual Value *Generate(Function *explodedFunction) { SmallVector<Value *, 16> args; ExplodeArgs(args); Value *result = CreateCall(explodedFunction, args); result = ConvertResult(result); return result; } protected: CallInst *m_CI; IRBuilder<> m_builder; void ExplodeArgs(SmallVectorImpl<Value *> &args) { for (Value *arg : m_CI->arg_operands()) { // vector arg: <N x ty> -> ty, ty, ..., ty (N times) if (arg->getType()->isVectorTy()) { for (unsigned i = 0; i < arg->getType()->getVectorNumElements(); i++) { Value *xarg = m_builder.CreateExtractElement(arg, i); args.push_back(xarg); } } // any other value: arg -> arg else { args.push_back(arg); } } } Value *CreateCall(Function *explodedFunction, ArrayRef<Value *> args) { return m_builder.CreateCall(explodedFunction, args); } Value *ConvertResult(Value *result) { Type *origRetTy = m_CI->getType(); if (origRetTy->isVoidTy()) return ConvertVoidResult(result); else if (origRetTy->isVectorTy()) return ConvertVectorResult(origRetTy, result); else return ConvertScalarResult(origRetTy, result); } // Void result does not need any conversion. Value *ConvertVoidResult(Value *result) { return result; } // Vector result will be populated with the elements from the resource return. Value *ConvertVectorResult(Type *origRetTy, Value *result) { Type *resourceRetTy = result->getType(); assert(origRetTy->isVectorTy()); assert(resourceRetTy->isStructTy() && "expected resource return type to be a struct"); const unsigned vectorSize = origRetTy->getVectorNumElements(); const unsigned structSize = resourceRetTy->getStructNumElements(); const unsigned size = std::min(vectorSize, structSize); assert(vectorSize < structSize); // Copy resource struct elements to vector. Value *vector = UndefValue::get(origRetTy); for (unsigned i = 0; i < size; ++i) { Value *element = m_builder.CreateExtractValue(result, {i}); vector = m_builder.CreateInsertElement(vector, element, i); } return vector; } // Scalar result will be populated with the first element of the resource // return. Value *ConvertScalarResult(Type *origRetTy, Value *result) { assert(origRetTy->isSingleValueType()); return m_builder.CreateExtractValue(result, {0}); } }; // Translate function return and argument types for resource method lowering. class ResourceFunctionTypeTranslator : public FunctionTypeTranslator { public: ResourceFunctionTypeTranslator(OP &hlslOp) : m_hlslOp(hlslOp) {} // Translate return type as follows: // // void -> void // <N x ty> -> dx.types.ResRet.ty // ty -> dx.types.ResRet.ty virtual Type *TranslateReturnType(CallInst *CI) override { Type *RetTy = CI->getType(); if (RetTy->isVoidTy()) return RetTy; else if (RetTy->isVectorTy()) RetTy = RetTy->getVectorElementType(); return m_hlslOp.GetResRetType(RetTy); } // Translate argument type as follows: // // resource -> dx.types.Handle // <N x ty> -> { ty, N } // ty -> { ty, 1 } virtual ArgumentType TranslateArgumentType(Value *OrigArg) override { int count = 1; Type *ty = OrigArg->getType(); if (ty->isVectorTy()) { count = ty->getVectorNumElements(); ty = ty->getVectorElementType(); } return ArgumentType(ty, count); } private: OP &m_hlslOp; }; Value *ExtensionLowering::Resource(CallInst *CI) { // Extra strategy info overrides the default lowering for resource methods. if (!m_extraStrategyInfo.empty()) { return CustomResource(CI); } ResourceFunctionTypeTranslator resourceTypeTranslator(m_hlslOp); Function *resourceFunction = FunctionTranslator::GetLoweredFunction(resourceTypeTranslator, CI, *this); if (!resourceFunction) return NoTranslation(CI); ResourceMethodCall explode(CI); Value *result = explode.Generate(resourceFunction); return result; } // This class handles the core logic for custom lowering of resource // method intrinsics. The goal is to allow resource extension intrinsics // to be handled the same way as the core hlsl resource intrinsics. // // Specifically, we want to support: // // 1. Multiple hlsl overloads map to a single dxil intrinsic // 2. The hlsl overloads can take different parameters for a given resource // type // 3. The hlsl overloads are not consistent across different resource types // // To achieve these goals we need a more complex mechanism for describing how // to translate the high-level arguments to arguments for a dxil function. // The custom lowering info describes this lowering using the following format. // // [Custom Lowering Info Format] // A json string encoding a map where each key is either a specific resource // type or the keyword "default" to be used for any other resource. The value is // a a custom-format string encoding how high-level arguments are mapped to dxil // intrinsic arguments. // // [Argument Translation Format] // A comma separated string where the number of fields is exactly equal to the // number of parameters in the target dxil intrinsic. Each field describes how // to generate the argument for that dxil intrinsic parameter. It has the // following format where the hl_arg_index is mandatory, but the other two parts // are optional. // // <hl_arg_index>.<vector_index>:<optional_type_info> // // The format is precisely described by the following regular expression: // // (?P<hl_arg_index>[-0-9]+)(.(?P<vector_index>[-0-9]+))?(:(?P<optional_type_info>\?i32|\?i16|\?i8|\?float|\?half))?$ // // Example // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Say we want to define the MyTextureOp extension with the following overloads: // // Texture1D // MyTextureOp(uint addr, uint offset) // MyTextureOp(uint addr, uint offset, uint val) // // Texture2D // MyTextureOp(uint2 addr, uint2 val) // // And a dxil intrinsic defined as follows // @MyTextureOp(i32 opcode, %dx.types.Handle handle, i32 addr0, i32 addr1, i32 // offset, i32 val0, i32 val1) // // Then we would define the lowering info json as follows // // { // "default" : "0, 1, 2.0, 2.1, 3 , 4.0:?i32, 4.1:?i32" // "Texture2D" : "0, 1, 2.0, 2.1, -1:?i32, 3.0 , 3.1\" // } // // // This would produce the following lowerings (assuming the MyTextureOp opcode // is 17) // // hlsl: Texture1D.MyTextureOp(a, b) // hl: @MyTextureOp(17, handle, a, b) // dxil: @MyTextureOp(17, handle, a, undef, b, undef, undef) // // hlsl: Texture1D.MyTextureOp(a, b, c) // hl: @MyTextureOp(17, handle, a, b, c) // dxil: @MyTextureOp(17, handle, a, undef, b, c, undef) // // hlsl: Texture2D.MyTextureOp(a, c) // hl: @MyTextureOp(17, handle, a, c) // dxil: @MyTextureOp(17, handle, a.x, a.y, undef, c.x, c.y) // // class CustomLowering { public: CustomLowering(StringRef LoweringInfo, CallInst *CI) { // Parse lowering info json format. std::map<ResourceKindName, std::vector<DxilArgInfo>> LoweringInfoMap = ParseLoweringInfo(LoweringInfo, CI->getContext()); // Find the default lowering kind std::vector<DxilArgInfo> *pArgInfo = nullptr; if (LoweringInfoMap.count(m_DefaultInfoName)) { pArgInfo = &LoweringInfoMap.at(m_DefaultInfoName); } else { ThrowExtensionError("Unable to find lowering info for custom function"); } // Don't explode vectors for custom functions GenerateLoweredArgs(CI, *pArgInfo); } CustomLowering(StringRef LoweringInfo, CallInst *CI, HLResourceLookup &ResourceLookup) { // Parse lowering info json format. std::map<ResourceKindName, std::vector<DxilArgInfo>> LoweringInfoMap = ParseLoweringInfo(LoweringInfo, CI->getContext()); // Lookup resource kind based on handle (first arg after hl opcode) enum { RESOURCE_HANDLE_ARG = 1 }; const char *pName = nullptr; if (!ResourceLookup.GetResourceKindName( CI->getArgOperand(RESOURCE_HANDLE_ARG), &pName)) { ThrowExtensionError("Failed to find resource from handle"); } std::string Name(pName); // Select lowering info to use based on resource kind. std::vector<DxilArgInfo> *pArgInfo = nullptr; if (LoweringInfoMap.count(Name)) { pArgInfo = &LoweringInfoMap.at(Name); } else if (LoweringInfoMap.count(m_DefaultInfoName)) { pArgInfo = &LoweringInfoMap.at(m_DefaultInfoName); } else { ThrowExtensionError("Unable to find lowering info for resource"); } GenerateLoweredArgs(CI, *pArgInfo); } const std::vector<Value *> &GetLoweredArgs() const { return m_LoweredArgs; } private: struct OptionalTypeSpec { const char *TypeName; Type *LLVMType; }; // These are the supported optional types for generating dxil parameters // that have no matching argument in the high-level intrinsic overload. // See [Argument Translation Format] for details. void InitOptionalTypes(LLVMContext &Ctx) { // Table of supported optional types. // Keep in sync with m_OptionalTypes small vector size to avoid // dynamic allocation. OptionalTypeSpec OptionalTypes[] = { {"?i32", Type::getInt32Ty(Ctx)}, {"?float", Type::getFloatTy(Ctx)}, {"?half", Type::getHalfTy(Ctx)}, {"?i8", Type::getInt8Ty(Ctx)}, {"?i16", Type::getInt16Ty(Ctx)}, {"?i1", Type::getInt1Ty(Ctx)}, }; DXASSERT(m_OptionalTypes.empty(), "Init should only be called once"); m_OptionalTypes.clear(); m_OptionalTypes.reserve(_countof(OptionalTypes)); for (const OptionalTypeSpec &T : OptionalTypes) { m_OptionalTypes.push_back(T); } } Type *ParseOptionalType(StringRef OptionalTypeInfo) { if (OptionalTypeInfo.empty()) { return nullptr; } for (OptionalTypeSpec &O : m_OptionalTypes) { if (OptionalTypeInfo == O.TypeName) { return O.LLVMType; } } ThrowExtensionError("Failed to parse optional type"); } // Mapping from high level function arg to dxil function arg. // // The `HighLevelArgIndex` is the index of the function argument to // which this dxil argument maps. // // If `HasVectorIndex` is true then the `VectorIndex` contains the // index of the element in the vector pointed to by HighLevelArgIndex. // // The `OptionalType` is used to specify types for arguments that are not // present in all overloads of the high level function. This lets us // map multiple high level functions to a single dxil extension intrinsic. // struct DxilArgInfo { unsigned HighLevelArgIndex = 0; unsigned VectorIndex = 0; bool HasVectorIndex = false; Type *OptionalType = nullptr; }; typedef std::string ResourceKindName; // Convert the lowering info to a machine-friendly format. // Note that we use the YAML parser to parse the JSON since JSON // is a subset of YAML (and this llvm has no JSON parser). // // See [Custom Lowering Info Format] for details. std::map<ResourceKindName, std::vector<DxilArgInfo>> ParseLoweringInfo(StringRef LoweringInfo, LLVMContext &Ctx) { InitOptionalTypes(Ctx); std::map<ResourceKindName, std::vector<DxilArgInfo>> LoweringInfoMap; SourceMgr SM; yaml::Stream YAMLStream(LoweringInfo, SM); // Make sure we have a valid json input. llvm::yaml::document_iterator I = YAMLStream.begin(); if (I == YAMLStream.end()) { ThrowExtensionError("Found empty resource lowering JSON."); } llvm::yaml::Node *Root = I->getRoot(); if (!Root) { ThrowExtensionError("Error parsing resource lowering JSON."); } // Parse the top level map object. llvm::yaml::MappingNode *Object = dyn_cast<llvm::yaml::MappingNode>(Root); if (!Object) { ThrowExtensionError( "Expected map in top level of resource lowering JSON."); } // Parse all key/value pairs from the map. for (llvm::yaml::MappingNode::iterator KVI = Object->begin(), KVE = Object->end(); KVI != KVE; ++KVI) { // Parse key. llvm::yaml::ScalarNode *KeyString = dyn_cast_or_null<llvm::yaml::ScalarNode>((*KVI).getKey()); if (!KeyString) { ThrowExtensionError( "Expected string as key in resource lowering info JSON map."); } SmallString<32> KeyStorage; StringRef Key = KeyString->getValue(KeyStorage); // Parse value. llvm::yaml::ScalarNode *ValueString = dyn_cast_or_null<llvm::yaml::ScalarNode>((*KVI).getValue()); if (!ValueString) { ThrowExtensionError( "Expected string as value in resource lowering info JSON map."); } SmallString<128> ValueStorage; StringRef Value = ValueString->getValue(ValueStorage); // Parse dxil arg info from value. LoweringInfoMap[Key] = ParseDxilArgInfo(Value, Ctx); } return LoweringInfoMap; } // Parse the dxail argument translation info. // See [Argument Translation Format] for details. std::vector<DxilArgInfo> ParseDxilArgInfo(StringRef ArgSpec, LLVMContext &Ctx) { std::vector<DxilArgInfo> Args; SmallVector<StringRef, 14> Splits; ArgSpec.split(Splits, ","); for (const StringRef &Split : Splits) { StringRef Field = Split.trim(); StringRef HighLevelArgInfo; StringRef OptionalTypeInfo; std::tie(HighLevelArgInfo, OptionalTypeInfo) = Field.split(":"); Type *OptionalType = ParseOptionalType(OptionalTypeInfo); StringRef HighLevelArgIndex; StringRef VectorIndex; std::tie(HighLevelArgIndex, VectorIndex) = HighLevelArgInfo.split("."); // Parse the arg and vector index. // Parse the values as signed integers, but store them as unsigned values // to allows using -1 as a shorthand for the max value. DxilArgInfo ArgInfo; ArgInfo.HighLevelArgIndex = static_cast<unsigned>(std::stoi(HighLevelArgIndex)); if (!VectorIndex.empty()) { ArgInfo.HasVectorIndex = true; ArgInfo.VectorIndex = static_cast<unsigned>(std::stoi(VectorIndex)); } ArgInfo.OptionalType = OptionalType; Args.push_back(ArgInfo); } return Args; } // Create the dxil args based on custom lowering info. void GenerateLoweredArgs(CallInst *CI, const std::vector<DxilArgInfo> &ArgInfoRecords) { IRBuilder<> builder(CI); for (const DxilArgInfo &ArgInfo : ArgInfoRecords) { // Check to see if we have the corresponding high-level arg in the // overload for this call. if (ArgInfo.HighLevelArgIndex < CI->getNumArgOperands()) { Value *Arg = CI->getArgOperand(ArgInfo.HighLevelArgIndex); if (ArgInfo.HasVectorIndex) { // We expect a vector type here, but we handle one special case if // not. if (Arg->getType()->isVectorTy()) { // We allow multiple high-level overloads to map to a single dxil // extension function. If the vector index is invalid for this // specific overload then use an undef value as a replacement. if (ArgInfo.VectorIndex < Arg->getType()->getVectorNumElements()) { Arg = builder.CreateExtractElement(Arg, ArgInfo.VectorIndex); } else { Arg = UndefValue::get(Arg->getType()->getVectorElementType()); } } else { // If it is a non-vector type then we replace non-zero vector index // with undef. This is to handle hlsl intrinsic overloading rules // that allow scalars in place of single-element vectors. We assume // here that a non-vector means that a single element vector was // already scalarized. // if (ArgInfo.VectorIndex > 0) { Arg = UndefValue::get(Arg->getType()); } } } else { // If the vector isn't exploded, use structs for DXIL Intrinsics if (Arg->getType()->isVectorTy()) { Arg = PackVectorIntoStruct(builder, Arg); } } m_LoweredArgs.push_back(Arg); } else if (ArgInfo.OptionalType) { // If there was no matching high-level arg then we look for the optional // arg type specified by the lowering info. m_LoweredArgs.push_back(UndefValue::get(ArgInfo.OptionalType)); } else { // No way to know how to generate the correc type for this dxil arg. ThrowExtensionError("Unable to map high-level arg to dxil arg"); } } } std::vector<Value *> m_LoweredArgs; SmallVector<OptionalTypeSpec, 5> m_OptionalTypes; const char *m_DefaultInfoName = "default"; }; // Boilerplate to reuse exising logic as much as possible. // We just want to overload GetFunctionType here. class CustomFunctionTranslator : public FunctionTranslator { public: static Function *GetLoweredFunction(const CustomLowering &CustomLowering, FunctionTypeTranslator &typeTranslator, CallInst *CI, ExtensionLowering &lower) { CustomFunctionTranslator T(CustomLowering, typeTranslator, lower); return T.FunctionTranslator::GetLoweredFunction(CI); } private: CustomFunctionTranslator(const CustomLowering &CustomLowering, FunctionTypeTranslator &typeTranslator, ExtensionLowering &lower) : FunctionTranslator(typeTranslator, lower), m_CustomLowering(CustomLowering) {} virtual FunctionType *GetFunctionType(CallInst *CI, Type *RetTy) override { SmallVector<Type *, 16> ParamTypes; for (Value *V : m_CustomLowering.GetLoweredArgs()) { ParamTypes.push_back(V->getType()); } const bool IsVarArg = false; return FunctionType::get(RetTy, ParamTypes, IsVarArg); } private: const CustomLowering &m_CustomLowering; }; // Boilerplate to reuse exising logic as much as possible. // We just want to overload Generate here. class CustomResourceMethodCall : public ResourceMethodCall { public: CustomResourceMethodCall(CallInst *CI, const CustomLowering &CustomLowering) : ResourceMethodCall(CI), m_CustomLowering(CustomLowering) {} virtual Value *Generate(Function *loweredFunction) override { Value *result = CreateCall(loweredFunction, m_CustomLowering.GetLoweredArgs()); result = ConvertResult(result); return result; } private: const CustomLowering &m_CustomLowering; }; // Support custom lowering logic for resource functions. Value *ExtensionLowering::CustomResource(CallInst *CI) { CustomLowering CustomLowering(m_extraStrategyInfo, CI, m_hlResourceLookup); ResourceFunctionTypeTranslator ResourceTypeTranslator(m_hlslOp); Function *ResourceFunction = CustomFunctionTranslator::GetLoweredFunction( CustomLowering, ResourceTypeTranslator, CI, *this); if (!ResourceFunction) return NoTranslation(CI); CustomResourceMethodCall custom(CI, CustomLowering); Value *Result = custom.Generate(ResourceFunction); return Result; } // Support custom lowering logic for arbitrary functions. Value *ExtensionLowering::Custom(CallInst *CI) { CustomLowering CustomLowering(m_extraStrategyInfo, CI); PackedFunctionTypeTranslator TypeTranslator; Function *CustomFunction = CustomFunctionTranslator::GetLoweredFunction( CustomLowering, TypeTranslator, CI, *this); if (!CustomFunction) return NoTranslation(CI); IRBuilder<> builder(CI); Value *result = builder.CreateCall(CustomFunction, CustomLowering.GetLoweredArgs()); // Arbitrary functions will expect vectors, not structs if (CustomFunction->getReturnType()->isStructTy()) { return PackStructIntoVector(builder, result); } return result; } /////////////////////////////////////////////////////////////////////////////// // Dxil Lowering. Value *ExtensionLowering::Dxil(CallInst *CI) { // Map the extension opcode to the corresponding dxil opcode. unsigned extOpcode = GetHLOpcode(CI); OP::OpCode dxilOpcode; if (!m_helper->GetDxilOpcode(extOpcode, dxilOpcode)) return nullptr; // Find the dxil function based on the overload type. Type *overloadTy = OP::GetOverloadType(dxilOpcode, CI->getCalledFunction()); Function *F = m_hlslOp.GetOpFunc(dxilOpcode, overloadTy->getScalarType()); // Update the opcode in the original call so we can just copy it below. // We are about to delete this call anyway. CI->setOperand(0, m_hlslOp.GetI32Const(static_cast<unsigned>(dxilOpcode))); // Create the new call. Value *result = nullptr; if (overloadTy->isVectorTy()) { ReplicateCall replicate(CI, *F); result = replicate.Generate(); } else { IRBuilder<> builder(CI); SmallVector<Value *, 8> args(CI->arg_operands().begin(), CI->arg_operands().end()); result = builder.CreateCall(F, args); } return result; } /////////////////////////////////////////////////////////////////////////////// // Computing Extension Names. // Compute the name to use for the intrinsic function call once it is lowered to // dxil. First checks to see if we have a custom name from the codegen helper // and if not chooses a default name based on the lowergin strategy. class ExtensionName { public: ExtensionName(CallInst *CI, ExtensionLowering::Strategy strategy, HLSLExtensionsCodegenHelper *helper) : m_CI(CI), m_strategy(strategy), m_helper(helper) {} std::string Get() { std::string name; if (m_helper) name = GetCustomExtensionName(m_CI, *m_helper); if (!HasCustomExtensionName(name)) name = GetDefaultCustomExtensionName( m_CI, ExtensionLowering::GetStrategyName(m_strategy)); return name; } private: CallInst *m_CI; ExtensionLowering::Strategy m_strategy; HLSLExtensionsCodegenHelper *m_helper; static std::string GetCustomExtensionName(CallInst *CI, HLSLExtensionsCodegenHelper &helper) { unsigned opcode = GetHLOpcode(CI); std::string name = helper.GetIntrinsicName(opcode); ReplaceOverloadMarkerWithTypeName(name, CI); return name; } static std::string GetDefaultCustomExtensionName(CallInst *CI, StringRef strategyName) { return (Twine(CI->getCalledFunction()->getName()) + "." + Twine(strategyName)) .str(); } static bool HasCustomExtensionName(const std::string name) { return name.size() > 0; } typedef unsigned OverloadArgIndex; static constexpr OverloadArgIndex DefaultOverloadIndex = std::numeric_limits<OverloadArgIndex>::max(); // Choose the (return value or argument) type that determines the overload // type for the intrinsic call. If the overload arg index was explicitly // specified (see ParseOverloadArgIndex) then we use that arg to pick the // overload name. Otherwise we pick a default where we take the return type as // the overload. If the return is void we take the first (non-opcode) argument // as the overload type. static Type *SelectOverloadSlot(CallInst *CI, OverloadArgIndex ArgIndex) { if (ArgIndex != DefaultOverloadIndex) { return CI->getArgOperand(ArgIndex)->getType(); } Type *ty = CI->getType(); if (ty->isVoidTy()) { if (CI->getNumArgOperands() > 1) ty = CI->getArgOperand(1)->getType(); // First non-opcode argument. } return ty; } static Type *GetOverloadType(CallInst *CI, OverloadArgIndex ArgIndex) { Type *ty = SelectOverloadSlot(CI, ArgIndex); if (ty->isVectorTy()) ty = ty->getVectorElementType(); return ty; } static std::string GetTypeName(Type *ty) { std::string typeName; llvm::raw_string_ostream os(typeName); ty->print(os); os.flush(); return typeName; } static std::string GetOverloadTypeName(CallInst *CI, OverloadArgIndex ArgIndex) { Type *ty = GetOverloadType(CI, ArgIndex); return GetTypeName(ty); } // Parse the arg index out of the overload marker (if any). // // The function names use a $o to indicate that the function is overloaded // and we should replace $o with the overload type. The extension name can // explicitly set which arg to use for the overload type by adding a colon // and a number after the $o (e.g. $o:3 would say the overload type is // determined by parameter 3). // // If we find an arg index after the overload marker we update the size // of the marker to include the full parsed string size so that it can // be replaced with the selected overload type. // static OverloadArgIndex ParseOverloadArgIndex(const std::string &functionName, size_t OverloadMarkerStartIndex, size_t *pOverloadMarkerSize) { assert(OverloadMarkerStartIndex != std::string::npos); size_t StartIndex = OverloadMarkerStartIndex + *pOverloadMarkerSize; // Check if we have anything after the overload marker to parse. if (StartIndex >= functionName.size()) { return DefaultOverloadIndex; } // Does it start with a ':' ? if (functionName[StartIndex] != ':') { return DefaultOverloadIndex; } // Skip past the : ++StartIndex; // Collect all the digits. std::string Digits; Digits.reserve(functionName.size() - StartIndex); for (size_t i = StartIndex; i < functionName.size(); ++i) { char c = functionName[i]; if (!isdigit(c)) { break; } Digits.push_back(c); } if (Digits.empty()) { return DefaultOverloadIndex; } *pOverloadMarkerSize = *pOverloadMarkerSize + std::strlen(":") + Digits.size(); return std::stoi(Digits); } // Find the occurence of the overload marker $o and replace it the the // overload type name. static void ReplaceOverloadMarkerWithTypeName(std::string &functionName, CallInst *CI) { const char *OverloadMarker = "$o"; size_t OverloadMarkerLength = 2; size_t pos = functionName.find(OverloadMarker); if (pos != std::string::npos) { OverloadArgIndex ArgIndex = ParseOverloadArgIndex(functionName, pos, &OverloadMarkerLength); std::string typeName = GetOverloadTypeName(CI, ArgIndex); functionName.replace(pos, OverloadMarkerLength, typeName); } } }; std::string ExtensionLowering::GetExtensionName(llvm::CallInst *CI) { ExtensionName name(CI, m_strategy, m_helper); return name.Get(); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilLegalizeSampleOffsetPass.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilSignature.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // DxilLegalizeSampleOffsetPass implementation. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "llvm/Analysis/DxilValueCache.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/PassManager.h" #include "llvm/Pass.h" #include "llvm/Transforms/Scalar.h" #include <unordered_set> using std::unique_ptr; using std::vector; using namespace llvm; using namespace hlsl; /////////////////////////////////////////////////////////////////////////////// // Legalize Sample offset. namespace { // record of the offset value and the call that uses it // Used mainly for error detection and reporting struct Offset { Value *offset; CallInst *call; }; // When optimizations are disabled, try to legalize sample offset. class DxilLegalizeSampleOffsetPass : public FunctionPass { LoopInfo LI; public: static char ID; // Pass identification, replacement for typeid explicit DxilLegalizeSampleOffsetPass() : FunctionPass(ID) {} StringRef getPassName() const override { return "DXIL legalize sample offset"; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DxilValueCache>(); AU.setPreservesAll(); } bool runOnFunction(Function &F) override { DxilModule &DM = F.getParent()->GetOrCreateDxilModule(); hlsl::OP *hlslOP = DM.GetOP(); std::vector<Offset> illegalOffsets; CollectIllegalOffsets(illegalOffsets, F, hlslOP); if (illegalOffsets.empty()) return false; // Loop unroll if has offset inside loop. TryUnrollLoop(illegalOffsets, F); // Collect offset again after mem2reg. std::vector<Offset> ssaIllegalOffsets; CollectIllegalOffsets(ssaIllegalOffsets, F, hlslOP); // Run simple optimization to legalize offsets. LegalizeOffsets(ssaIllegalOffsets); // If 6.7 or more, permit remaining "illegal" offsets if (DM.GetShaderModel()->IsSM67Plus()) return true; FinalCheck(F, hlslOP); return true; } private: void TryUnrollLoop(std::vector<Offset> &illegalOffsets, Function &F); void CollectIllegalOffsets(std::vector<Offset> &illegalOffsets, Function &F, hlsl::OP *hlslOP); void CollectIllegalOffsets(std::vector<Offset> &illegalOffsets, Function &F, DXIL::OpCode opcode, hlsl::OP *hlslOP); void LegalizeOffsets(const std::vector<Offset> &illegalOffsets); void FinalCheck(Function &F, hlsl::OP *hlslOP); }; char DxilLegalizeSampleOffsetPass::ID = 0; bool HasIllegalOffsetInLoop(std::vector<Offset> &illegalOffsets, LoopInfo &LI, Function &F) { DominatorTreeAnalysis DTA; DominatorTree DT = DTA.run(F); LI.Analyze(DT); bool findOffset = false; for (auto it : illegalOffsets) { if (const Instruction *I = dyn_cast<Instruction>(it.offset)) { const BasicBlock *BB = I->getParent(); // TODO: determine whether values are actually loop dependent, not just in // a loop if (LI.getLoopFor(BB)) { findOffset = true; break; } } } return findOffset; } void GetOffsetRange(DXIL::OpCode opcode, unsigned &offsetStart, unsigned &offsetEnd) { if (DXIL::OpCode::TextureLoad == opcode) { offsetStart = DXIL::OperandIndex::kTextureLoadOffset0OpIdx; offsetEnd = DXIL::OperandIndex::kTextureLoadOffset2OpIdx; } else { // assume samples offsetStart = DXIL::OperandIndex::kTextureSampleOffset0OpIdx; offsetEnd = DXIL::OperandIndex::kTextureSampleOffset2OpIdx; } } void CollectIllegalOffset(CallInst *CI, DXIL::OpCode opcode, std::vector<Offset> &illegalOffsets) { unsigned offsetStart = 0, offsetEnd = 0; GetOffsetRange(opcode, offsetStart, offsetEnd); Value *offset0 = CI->getArgOperand(offsetStart); // No offsets if (isa<UndefValue>(offset0)) return; for (unsigned i = offsetStart; i <= offsetEnd; i++) { Value *offset = CI->getArgOperand(i); if (Instruction *I = dyn_cast<Instruction>(offset)) { Offset offset = {I, CI}; illegalOffsets.emplace_back(offset); } else if (ConstantInt *cOffset = dyn_cast<ConstantInt>(offset)) { int64_t val = cOffset->getValue().getSExtValue(); if (val > 7 || val < -8) { Offset offset = {cOffset, CI}; illegalOffsets.emplace_back(offset); } } } } } // namespace // Return true if the call instruction in pair a and b are the same bool InstEq(const Offset &a, const Offset &b) { return a.call == b.call; } // Return true if the call instruction in pair a is before that in pair b bool InstLT(const Offset &a, const Offset &b) { DebugLoc aLoc = a.call->getDebugLoc(); DebugLoc bLoc = b.call->getDebugLoc(); if (aLoc && bLoc) { DIScope *aScope = cast<DIScope>(aLoc->getRawScope()); DIScope *bScope = cast<DIScope>(bLoc->getRawScope()); std::string aFile = aScope->getFilename(); std::string bFile = bScope->getFilename(); return aFile < bFile || (aFile == bFile && aLoc.getLine() < bLoc.getLine()); } // No line numbers, just compare pointers so that matching instructions will // be adjacent return a.call < b.call; } void DxilLegalizeSampleOffsetPass::FinalCheck(Function &F, hlsl::OP *hlslOP) { // Collect offset to make sure no illegal offsets. std::vector<Offset> finalIllegalOffsets; CollectIllegalOffsets(finalIllegalOffsets, F, hlslOP); if (!finalIllegalOffsets.empty()) { std::string errorMsg = "Offsets to texture access operations must be immediate values. "; auto offsetBegin = finalIllegalOffsets.begin(); auto offsetEnd = finalIllegalOffsets.end(); std::sort(offsetBegin, offsetEnd, InstLT); offsetEnd = std::unique(offsetBegin, offsetEnd, InstEq); for (auto it = offsetBegin; it != offsetEnd; it++) { CallInst *CI = it->call; if (Instruction *offset = dyn_cast<Instruction>(it->offset)) { if (LI.getLoopFor(offset->getParent())) dxilutil::EmitErrorOnInstruction( CI, errorMsg + "Unrolling the loop containing the offset value" " manually and using -O3 may help in some cases.\n"); else dxilutil::EmitErrorOnInstruction(CI, errorMsg); } else { dxilutil::EmitErrorOnInstruction( CI, "Offsets to texture access operations must be between -8 and 7. "); } } } } void DxilLegalizeSampleOffsetPass::TryUnrollLoop( std::vector<Offset> &illegalOffsets, Function &F) { legacy::FunctionPassManager PM(F.getParent()); // Scalarize aggregates as mem2reg only applies on scalars. PM.add(createSROAPass()); // Always need mem2reg for simplify illegal offsets. PM.add(createPromoteMemoryToRegisterPass()); bool UnrollLoop = HasIllegalOffsetInLoop(illegalOffsets, LI, F); if (UnrollLoop) { PM.add(createCFGSimplificationPass()); PM.add(createLCSSAPass()); PM.add(createLoopSimplifyPass()); PM.add(createLoopRotatePass()); PM.add(createLoopUnrollPass(-2, -1, 0, 0)); } PM.run(F); if (UnrollLoop) { DxilValueCache *DVC = &getAnalysis<DxilValueCache>(); DVC->ResetUnknowns(); } } void DxilLegalizeSampleOffsetPass::CollectIllegalOffsets( std::vector<Offset> &illegalOffsets, Function &CurF, hlsl::OP *hlslOP) { CollectIllegalOffsets(illegalOffsets, CurF, DXIL::OpCode::Sample, hlslOP); CollectIllegalOffsets(illegalOffsets, CurF, DXIL::OpCode::SampleBias, hlslOP); CollectIllegalOffsets(illegalOffsets, CurF, DXIL::OpCode::SampleCmp, hlslOP); CollectIllegalOffsets(illegalOffsets, CurF, DXIL::OpCode::SampleCmpLevelZero, hlslOP); CollectIllegalOffsets(illegalOffsets, CurF, DXIL::OpCode::SampleGrad, hlslOP); CollectIllegalOffsets(illegalOffsets, CurF, DXIL::OpCode::SampleLevel, hlslOP); CollectIllegalOffsets(illegalOffsets, CurF, DXIL::OpCode::TextureLoad, hlslOP); } void DxilLegalizeSampleOffsetPass::CollectIllegalOffsets( std::vector<Offset> &illegalOffsets, Function &CurF, DXIL::OpCode opcode, hlsl::OP *hlslOP) { auto &intrFuncList = hlslOP->GetOpFuncList(opcode); for (auto it : intrFuncList) { Function *intrFunc = it.second; if (!intrFunc) continue; for (User *U : intrFunc->users()) { CallInst *CI = cast<CallInst>(U); // Skip inst not in current function. if (CI->getParent()->getParent() != &CurF) continue; CollectIllegalOffset(CI, opcode, illegalOffsets); } } } void DxilLegalizeSampleOffsetPass::LegalizeOffsets( const std::vector<Offset> &illegalOffsets) { if (!illegalOffsets.empty()) { DxilValueCache *DVC = &getAnalysis<DxilValueCache>(); for (auto it : illegalOffsets) { if (Instruction *I = dyn_cast<Instruction>(it.offset)) if (Value *V = DVC->GetValue(I)) I->replaceAllUsesWith(V); } } } FunctionPass *llvm::createDxilLegalizeSampleOffsetPass() { return new DxilLegalizeSampleOffsetPass(); } INITIALIZE_PASS_BEGIN(DxilLegalizeSampleOffsetPass, "dxil-legalize-sample-offset", "DXIL legalize sample offset", false, false) INITIALIZE_PASS_DEPENDENCY(DxilValueCache) INITIALIZE_PASS_END(DxilLegalizeSampleOffsetPass, "dxil-legalize-sample-offset", "DXIL legalize sample offset", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/ComputeViewIdStateBuilder.cpp
/////////////////////////////////////////////////////////////////////////////// // // // ComputeViewIdStateBuilder.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilInstructions.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/HLSL/ComputeViewIdState.h" #include "dxc/HLSL/HLOperations.h" #include "dxc/HlslIntrinsicOp.h" #include "dxc/Support/Global.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Function.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include <algorithm> using namespace llvm; using namespace llvm::legacy; using namespace hlsl; using llvm::legacy::FunctionPassManager; using llvm::legacy::PassManager; using std::unordered_map; using std::unordered_set; using std::vector; #define DXILVIEWID_DBG 0 #define DEBUG_TYPE "viewid_builder" namespace { class DxilViewIdStateBuilder { static const unsigned kNumComps = 4; static const unsigned kMaxSigScalars = 32 * 4; public: using OutputsDependentOnViewIdType = DxilViewIdStateData::OutputsDependentOnViewIdType; using InputsContributingToOutputType = DxilViewIdStateData::InputsContributingToOutputType; DxilViewIdStateBuilder(DxilViewIdStateData &state, DxilModule *pDxilModule) : m_pModule(pDxilModule), m_NumInputSigScalars(state.m_NumInputSigScalars), m_NumOutputSigScalars(state.m_NumOutputSigScalars, DxilViewIdStateData::kNumStreams), m_NumPCOrPrimSigScalars(state.m_NumPCOrPrimSigScalars), m_OutputsDependentOnViewId(state.m_OutputsDependentOnViewId, DxilViewIdStateData::kNumStreams), m_PCOrPrimOutputsDependentOnViewId( state.m_PCOrPrimOutputsDependentOnViewId), m_InputsContributingToOutputs(state.m_InputsContributingToOutputs, DxilViewIdStateData::kNumStreams), m_InputsContributingToPCOrPrimOutputs( state.m_InputsContributingToPCOrPrimOutputs), m_PCInputsContributingToOutputs(state.m_PCInputsContributingToOutputs), m_bUsesViewId(state.m_bUsesViewId) {} void Compute(); private: static const unsigned kNumStreams = 4; DxilModule *m_pModule; unsigned &m_NumInputSigScalars; MutableArrayRef<unsigned> m_NumOutputSigScalars; unsigned &m_NumPCOrPrimSigScalars; // Set of scalar outputs dependent on ViewID. MutableArrayRef<OutputsDependentOnViewIdType> m_OutputsDependentOnViewId; OutputsDependentOnViewIdType &m_PCOrPrimOutputsDependentOnViewId; // Set of scalar inputs contributing to computation of scalar outputs. MutableArrayRef<InputsContributingToOutputType> m_InputsContributingToOutputs; InputsContributingToOutputType &m_InputsContributingToPCOrPrimOutputs; // HS PC and MS Prim only. InputsContributingToOutputType &m_PCInputsContributingToOutputs; // DS only. bool &m_bUsesViewId; // Members for build ViewIdState. // Dynamically indexed components of signature elements. using DynamicallyIndexedElemsType = std::unordered_map<unsigned, unsigned>; DynamicallyIndexedElemsType m_InpSigDynIdxElems; DynamicallyIndexedElemsType m_OutSigDynIdxElems; DynamicallyIndexedElemsType m_PCSigDynIdxElems; // Information per entry point. using FunctionSetType = std::unordered_set<llvm::Function *>; using InstructionSetType = std::unordered_set<llvm::Instruction *>; struct EntryInfo { llvm::Function *pEntryFunc = nullptr; // Sets of functions that may be reachable from an entry. FunctionSetType Functions; // Outputs to analyze. InstructionSetType Outputs; // Contributing instructions per output. std::unordered_map<unsigned, InstructionSetType> ContributingInstructions[kNumStreams]; void Clear(); }; EntryInfo m_Entry; EntryInfo m_PCEntry; // Information per function. using FunctionReturnSet = std::unordered_set<llvm::ReturnInst *>; struct FuncInfo { FunctionReturnSet Returns; ControlDependence CtrlDep; std::unique_ptr<llvm::DominatorTreeBase<llvm::BasicBlock>> pDomTree; void Clear(); }; std::unordered_map<llvm::Function *, std::unique_ptr<FuncInfo>> m_FuncInfo; // Cache of decls (global/alloca) reaching a pointer value. using ValueSetType = std::unordered_set<llvm::Value *>; std::unordered_map<llvm::Value *, ValueSetType> m_ReachingDeclsCache; // Cache of stores for each decl. std::unordered_map<llvm::Value *, ValueSetType> m_StoresPerDeclCache; void Clear(); void DetermineMaxPackedLocation(DxilSignature &DxilSig, unsigned *pMaxSigLoc, unsigned NumStreams); void ComputeReachableFunctionsRec(llvm::CallGraph &CG, llvm::CallGraphNode *pNode, FunctionSetType &FuncSet); void AnalyzeFunctions(EntryInfo &Entry); void CollectValuesContributingToOutputs(EntryInfo &Entry); void CollectValuesContributingToOutputRec( EntryInfo &Entry, llvm::Value *pContributingValue, InstructionSetType &ContributingInstructions); void CollectPhiCFValuesContributingToOutputRec( llvm::PHINode *pPhi, EntryInfo &Entry, InstructionSetType &ContributingInstructions); const ValueSetType &CollectReachingDecls(llvm::Value *pValue); void CollectReachingDeclsRec(llvm::Value *pValue, ValueSetType &ReachingDecls, ValueSetType &Visited); const ValueSetType &CollectStores(llvm::Value *pValue); void CollectStoresRec(llvm::Value *pValue, ValueSetType &Stores, ValueSetType &Visited); void UpdateDynamicIndexUsageState() const; void CreateViewIdSets(const std::unordered_map<unsigned, InstructionSetType> &ContributingInstructions, OutputsDependentOnViewIdType &OutputsDependentOnViewId, InputsContributingToOutputType &InputsContributingToOutputs, bool bPC); void UpdateDynamicIndexUsageStateForSig( DxilSignature &Sig, const DynamicallyIndexedElemsType &DynIdxElems) const; unsigned GetLinearIndex(DxilSignatureElement &SigElem, int row, unsigned col) const; }; } // namespace void DxilViewIdStateBuilder::Compute() { Clear(); const ShaderModel *pSM = m_pModule->GetShaderModel(); m_bUsesViewId = m_pModule->m_ShaderFlags.GetViewID(); // 1. Traverse signature MD to determine max packed location. DetermineMaxPackedLocation(m_pModule->GetInputSignature(), &m_NumInputSigScalars, 1); DetermineMaxPackedLocation(m_pModule->GetOutputSignature(), &m_NumOutputSigScalars[0], pSM->IsGS() ? kNumStreams : 1); DetermineMaxPackedLocation(m_pModule->GetPatchConstOrPrimSignature(), &m_NumPCOrPrimSigScalars, 1); // 2. Collect sets of functions reachable from main and pc entries. CallGraphAnalysis CGA; CallGraph CG = CGA.run(m_pModule->GetModule()); m_Entry.pEntryFunc = m_pModule->GetEntryFunction(); m_PCEntry.pEntryFunc = m_pModule->GetPatchConstantFunction(); ComputeReachableFunctionsRec(CG, CG[m_Entry.pEntryFunc], m_Entry.Functions); if (m_PCEntry.pEntryFunc) { DXASSERT_NOMSG(pSM->IsHS()); ComputeReachableFunctionsRec(CG, CG[m_PCEntry.pEntryFunc], m_PCEntry.Functions); } // 3. Determine shape components that are dynamically accesses and collect all // sig outputs. AnalyzeFunctions(m_Entry); if (m_PCEntry.pEntryFunc) { AnalyzeFunctions(m_PCEntry); } // 4. Collect sets of values contributing to outputs. CollectValuesContributingToOutputs(m_Entry); if (m_PCEntry.pEntryFunc) { CollectValuesContributingToOutputs(m_PCEntry); } // 5. Construct dependency sets. for (unsigned StreamId = 0; StreamId < (pSM->IsGS() ? kNumStreams : 1u); StreamId++) { CreateViewIdSets(m_Entry.ContributingInstructions[StreamId], m_OutputsDependentOnViewId[StreamId], m_InputsContributingToOutputs[StreamId], false); } if (pSM->IsHS() || pSM->IsMS()) { CreateViewIdSets(m_PCEntry.ContributingInstructions[0], m_PCOrPrimOutputsDependentOnViewId, m_InputsContributingToPCOrPrimOutputs, true); } else if (pSM->IsDS()) { OutputsDependentOnViewIdType OutputsDependentOnViewId; CreateViewIdSets(m_Entry.ContributingInstructions[0], OutputsDependentOnViewId, m_PCInputsContributingToOutputs, true); DXASSERT_NOMSG(OutputsDependentOnViewId == m_OutputsDependentOnViewId[0]); } // 6. Update dynamically indexed input/output component masks. UpdateDynamicIndexUsageState(); #if DXILVIEWID_DBG PrintSets(dbgs()); #endif } void DxilViewIdStateBuilder::Clear() { m_bUsesViewId = false; m_NumInputSigScalars = 0; for (unsigned i = 0; i < kNumStreams; i++) { m_NumOutputSigScalars[i] = 0; m_OutputsDependentOnViewId[i].reset(); m_InputsContributingToOutputs[i].clear(); } m_NumPCOrPrimSigScalars = 0; m_InpSigDynIdxElems.clear(); m_OutSigDynIdxElems.clear(); m_PCSigDynIdxElems.clear(); m_PCOrPrimOutputsDependentOnViewId.reset(); m_InputsContributingToPCOrPrimOutputs.clear(); m_PCInputsContributingToOutputs.clear(); m_Entry.Clear(); m_PCEntry.Clear(); m_FuncInfo.clear(); m_ReachingDeclsCache.clear(); } void DxilViewIdStateBuilder::EntryInfo::Clear() { pEntryFunc = nullptr; Functions.clear(); Outputs.clear(); for (unsigned i = 0; i < kNumStreams; i++) ContributingInstructions[i].clear(); } void DxilViewIdStateBuilder::FuncInfo::Clear() { Returns.clear(); CtrlDep.Clear(); pDomTree.reset(); } void DxilViewIdStateBuilder::DetermineMaxPackedLocation(DxilSignature &DxilSig, unsigned *pMaxSigLoc, unsigned NumStreams) { DXASSERT_NOMSG(NumStreams == 1 || NumStreams == kNumStreams); for (unsigned i = 0; i < NumStreams; i++) { pMaxSigLoc[i] = 0; } for (auto &E : DxilSig.GetElements()) { if (E->GetStartRow() == Semantic::kUndefinedRow) continue; unsigned StreamId = E->GetOutputStream(); unsigned endLoc = GetLinearIndex(*E, E->GetRows() - 1, E->GetCols() - 1); pMaxSigLoc[StreamId] = std::max(pMaxSigLoc[StreamId], endLoc + 1); E->GetCols(); } } void DxilViewIdStateBuilder::ComputeReachableFunctionsRec( CallGraph &CG, CallGraphNode *pNode, FunctionSetType &FuncSet) { Function *F = pNode->getFunction(); // Accumulate only functions with bodies. if (F->empty()) return; if (FuncSet.count(F)) return; auto itIns = FuncSet.emplace(F); DXASSERT_NOMSG(itIns.second); (void)itIns; for (auto it = pNode->begin(), itEnd = pNode->end(); it != itEnd; ++it) { CallGraphNode *pSuccNode = it->second; ComputeReachableFunctionsRec(CG, pSuccNode, FuncSet); } } static bool GetUnsignedVal(Value *V, uint32_t *pValue) { ConstantInt *CI = dyn_cast<ConstantInt>(V); if (!CI) return false; uint64_t u = CI->getZExtValue(); if (u > UINT32_MAX) return false; *pValue = (uint32_t)u; return true; } void DxilViewIdStateBuilder::AnalyzeFunctions(EntryInfo &Entry) { for (auto *F : Entry.Functions) { DXASSERT_NOMSG(!F->empty()); auto itFI = m_FuncInfo.find(F); FuncInfo *pFuncInfo = nullptr; if (itFI != m_FuncInfo.end()) { pFuncInfo = itFI->second.get(); } else { m_FuncInfo[F] = make_unique<FuncInfo>(); pFuncInfo = m_FuncInfo[F].get(); } for (auto itBB = F->begin(), endBB = F->end(); itBB != endBB; ++itBB) { BasicBlock *BB = itBB; for (auto itInst = BB->begin(), endInst = BB->end(); itInst != endInst; ++itInst) { if (ReturnInst *RI = dyn_cast<ReturnInst>(itInst)) { pFuncInfo->Returns.emplace(RI); continue; } CallInst *CI = dyn_cast<CallInst>(itInst); if (!CI) continue; DynamicallyIndexedElemsType *pDynIdxElems = nullptr; int row = Semantic::kUndefinedRow; unsigned id, col; if (DxilInst_LoadInput LI = DxilInst_LoadInput(CI)) { pDynIdxElems = &m_InpSigDynIdxElems; IFTBOOL(GetUnsignedVal(LI.get_inputSigId(), &id), DXC_E_GENERAL_INTERNAL_ERROR); GetUnsignedVal(LI.get_rowIndex(), (uint32_t *)&row); IFTBOOL(GetUnsignedVal(LI.get_colIndex(), &col), DXC_E_GENERAL_INTERNAL_ERROR); } else if (DxilInst_StoreOutput SO = DxilInst_StoreOutput(CI)) { pDynIdxElems = &m_OutSigDynIdxElems; IFTBOOL(GetUnsignedVal(SO.get_outputSigId(), &id), DXC_E_GENERAL_INTERNAL_ERROR); GetUnsignedVal(SO.get_rowIndex(), (uint32_t *)&row); IFTBOOL(GetUnsignedVal(SO.get_colIndex(), &col), DXC_E_GENERAL_INTERNAL_ERROR); Entry.Outputs.emplace(CI); } else if (DxilInst_StoreVertexOutput SVO = DxilInst_StoreVertexOutput(CI)) { pDynIdxElems = &m_OutSigDynIdxElems; IFTBOOL(GetUnsignedVal(SVO.get_outputSigId(), &id), DXC_E_GENERAL_INTERNAL_ERROR); GetUnsignedVal(SVO.get_rowIndex(), (uint32_t *)&row); IFTBOOL(GetUnsignedVal(SVO.get_colIndex(), &col), DXC_E_GENERAL_INTERNAL_ERROR); Entry.Outputs.emplace(CI); } else if (DxilInst_StorePrimitiveOutput SPO = DxilInst_StorePrimitiveOutput(CI)) { pDynIdxElems = &m_PCSigDynIdxElems; IFTBOOL(GetUnsignedVal(SPO.get_outputSigId(), &id), DXC_E_GENERAL_INTERNAL_ERROR); GetUnsignedVal(SPO.get_rowIndex(), (uint32_t *)&row); IFTBOOL(GetUnsignedVal(SPO.get_colIndex(), &col), DXC_E_GENERAL_INTERNAL_ERROR); Entry.Outputs.emplace(CI); } else if (DxilInst_LoadPatchConstant LPC = DxilInst_LoadPatchConstant(CI)) { if (m_pModule->GetShaderModel()->IsDS()) { pDynIdxElems = &m_PCSigDynIdxElems; IFTBOOL(GetUnsignedVal(LPC.get_inputSigId(), &id), DXC_E_GENERAL_INTERNAL_ERROR); GetUnsignedVal(LPC.get_row(), (uint32_t *)&row); IFTBOOL(GetUnsignedVal(LPC.get_col(), &col), DXC_E_GENERAL_INTERNAL_ERROR); } else { // Do nothing. This is an internal helper function for DXBC-2-DXIL // converter. DXASSERT_NOMSG(m_pModule->GetShaderModel()->IsHS()); } } else if (DxilInst_StorePatchConstant SPC = DxilInst_StorePatchConstant(CI)) { pDynIdxElems = &m_PCSigDynIdxElems; IFTBOOL(GetUnsignedVal(SPC.get_outputSigID(), &id), DXC_E_GENERAL_INTERNAL_ERROR); GetUnsignedVal(SPC.get_row(), (uint32_t *)&row); IFTBOOL(GetUnsignedVal(SPC.get_col(), &col), DXC_E_GENERAL_INTERNAL_ERROR); Entry.Outputs.emplace(CI); } else if (DxilInst_LoadOutputControlPoint LOCP = DxilInst_LoadOutputControlPoint(CI)) { if (m_pModule->GetShaderModel()->IsDS()) { pDynIdxElems = &m_InpSigDynIdxElems; IFTBOOL(GetUnsignedVal(LOCP.get_inputSigId(), &id), DXC_E_GENERAL_INTERNAL_ERROR); GetUnsignedVal(LOCP.get_row(), (uint32_t *)&row); IFTBOOL(GetUnsignedVal(LOCP.get_col(), &col), DXC_E_GENERAL_INTERNAL_ERROR); } else if (m_pModule->GetShaderModel()->IsHS()) { // Do nothings, as the information has been captured by the output // signature of CP entry. } else { DXASSERT_NOMSG(false); } } else { continue; } // Record dynamic index usage. if (pDynIdxElems && row == Semantic::kUndefinedRow) { (*pDynIdxElems)[id] |= (1 << col); } } } // Compute dominator relation. pFuncInfo->pDomTree = make_unique<DominatorTreeBase<BasicBlock>>(false); pFuncInfo->pDomTree->recalculate(*F); #if DXILVIEWID_DBG pFuncInfo->pDomTree->print(dbgs()); #endif // Compute postdominator relation. DominatorTreeBase<BasicBlock> PDR(true); PDR.recalculate(*F); #if DXILVIEWID_DBG PDR.print(dbgs()); #endif // Compute control dependence. pFuncInfo->CtrlDep.Compute(F, PDR); #if DXILVIEWID_DBG pFuncInfo->CtrlDep.print(dbgs()); #endif } } void DxilViewIdStateBuilder::CollectValuesContributingToOutputs( EntryInfo &Entry) { for (auto *CI : Entry.Outputs) { // CI = call instruction DxilSignature *pDxilSig = nullptr; Value *pContributingValue = nullptr; unsigned id = (unsigned)-1; int startRow = Semantic::kUndefinedRow, endRow = Semantic::kUndefinedRow; unsigned col = (unsigned)-1; if (DxilInst_StoreOutput SO = DxilInst_StoreOutput(CI)) { pDxilSig = &m_pModule->GetOutputSignature(); pContributingValue = SO.get_value(); GetUnsignedVal(SO.get_outputSigId(), &id); GetUnsignedVal(SO.get_colIndex(), &col); GetUnsignedVal(SO.get_rowIndex(), (uint32_t *)&startRow); } else if (DxilInst_StoreVertexOutput SVO = DxilInst_StoreVertexOutput(CI)) { pDxilSig = &m_pModule->GetOutputSignature(); pContributingValue = SVO.get_value(); GetUnsignedVal(SVO.get_outputSigId(), &id); GetUnsignedVal(SVO.get_colIndex(), &col); GetUnsignedVal(SVO.get_rowIndex(), (uint32_t *)&startRow); } else if (DxilInst_StorePrimitiveOutput SPO = DxilInst_StorePrimitiveOutput(CI)) { pDxilSig = &m_pModule->GetPatchConstOrPrimSignature(); pContributingValue = SPO.get_value(); GetUnsignedVal(SPO.get_outputSigId(), &id); GetUnsignedVal(SPO.get_colIndex(), &col); GetUnsignedVal(SPO.get_rowIndex(), (uint32_t *)&startRow); } else if (DxilInst_StorePatchConstant SPC = DxilInst_StorePatchConstant(CI)) { pDxilSig = &m_pModule->GetPatchConstOrPrimSignature(); pContributingValue = SPC.get_value(); GetUnsignedVal(SPC.get_outputSigID(), &id); GetUnsignedVal(SPC.get_row(), (uint32_t *)&startRow); GetUnsignedVal(SPC.get_col(), &col); } else { IFT(DXC_E_GENERAL_INTERNAL_ERROR); } DxilSignatureElement &SigElem = pDxilSig->GetElement(id); if (!SigElem.IsAllocated()) continue; unsigned StreamId = SigElem.GetOutputStream(); if (startRow != Semantic::kUndefinedRow) { endRow = startRow; } else { // The entire column is affected by value. DXASSERT_NOMSG(SigElem.GetID() == id && SigElem.GetStartRow() != Semantic::kUndefinedRow); startRow = 0; endRow = SigElem.GetRows() - 1; } InstructionSetType ContributingInstructionsAllRows; InstructionSetType *pContributingInstructions = &ContributingInstructionsAllRows; if (startRow == endRow) { // Scalar or indexable with known index. unsigned index = GetLinearIndex(SigElem, startRow, col); pContributingInstructions = &Entry.ContributingInstructions[StreamId][index]; } CollectValuesContributingToOutputRec(Entry, pContributingValue, *pContributingInstructions); // Handle control dependence of this instruction BB. BasicBlock *pBB = CI->getParent(); Function *F = pBB->getParent(); FuncInfo *pFuncInfo = m_FuncInfo[F].get(); const BasicBlockSet &CtrlDepSet = pFuncInfo->CtrlDep.GetCDBlocks(pBB); for (BasicBlock *B : CtrlDepSet) { CollectValuesContributingToOutputRec(Entry, B->getTerminator(), *pContributingInstructions); } if (pContributingInstructions == &ContributingInstructionsAllRows) { // Write dynamically indexed output contributions to all rows. for (int row = startRow; row <= endRow; row++) { unsigned index = GetLinearIndex(SigElem, row, col); Entry.ContributingInstructions[StreamId][index].insert( ContributingInstructionsAllRows.begin(), ContributingInstructionsAllRows.end()); } } } } void DxilViewIdStateBuilder::CollectValuesContributingToOutputRec( EntryInfo &Entry, Value *pContributingValue, InstructionSetType &ContributingInstructions) { if (dyn_cast<Argument>(pContributingValue)) { // This must be a leftover signature argument of an entry function. DXASSERT_NOMSG(Entry.pEntryFunc == m_pModule->GetEntryFunction() || Entry.pEntryFunc == m_pModule->GetPatchConstantFunction()); return; } Instruction *pContributingInst = dyn_cast<Instruction>(pContributingValue); if (pContributingInst == nullptr) { // Can be literal constant, global decl, branch target. DXASSERT_NOMSG(isa<Constant>(pContributingValue) || isa<BasicBlock>(pContributingValue)); return; } BasicBlock *pBB = pContributingInst->getParent(); Function *F = pBB->getParent(); auto FuncInfoIt = m_FuncInfo.find(F); DXASSERT_NOMSG(FuncInfoIt != m_FuncInfo.end()); if (FuncInfoIt == m_FuncInfo.end()) { return; } auto itInst = ContributingInstructions.emplace(pContributingInst); // Already visited instruction. if (!itInst.second) return; // Handle special cases. if (PHINode *phi = dyn_cast<PHINode>(pContributingInst)) { CollectPhiCFValuesContributingToOutputRec(phi, Entry, ContributingInstructions); } else if (isa<LoadInst>(pContributingInst) || isa<AtomicCmpXchgInst>(pContributingInst) || isa<AtomicRMWInst>(pContributingInst)) { Value *pPtrValue = pContributingInst->getOperand(0); DXASSERT_NOMSG(pPtrValue->getType()->isPointerTy()); const ValueSetType &ReachingDecls = CollectReachingDecls(pPtrValue); DXASSERT_NOMSG(ReachingDecls.size() > 0); for (Value *pDeclValue : ReachingDecls) { const ValueSetType &Stores = CollectStores(pDeclValue); for (Value *V : Stores) { CollectValuesContributingToOutputRec(Entry, V, ContributingInstructions); } } } else if (CallInst *CI = dyn_cast<CallInst>(pContributingInst)) { if (!hlsl::OP::IsDxilOpFuncCallInst(CI)) { Function *F = CI->getCalledFunction(); if (!F->empty()) { // Return value of a user function. if (Entry.Functions.find(F) != Entry.Functions.end()) { const FuncInfo &FI = *m_FuncInfo[F]; for (ReturnInst *pRetInst : FI.Returns) { CollectValuesContributingToOutputRec(Entry, pRetInst, ContributingInstructions); } } } } } // Handle instruction inputs. unsigned NumOps = pContributingInst->getNumOperands(); for (unsigned i = 0; i < NumOps; i++) { Value *O = pContributingInst->getOperand(i); CollectValuesContributingToOutputRec(Entry, O, ContributingInstructions); } // Handle control dependence of this instruction BB. FuncInfo *pFuncInfo = FuncInfoIt->second.get(); const BasicBlockSet &CtrlDepSet = pFuncInfo->CtrlDep.GetCDBlocks(pBB); for (BasicBlock *B : CtrlDepSet) { CollectValuesContributingToOutputRec(Entry, B->getTerminator(), ContributingInstructions); } } // Only process control-dependent basic blocks for constant operands of the // phi-function. An obvious "definition" point for a constant operand is the // predecessor along corresponding edge. However, this may be too conservative // and, as such, pick up extra control dependent BBs. A better "definition" // point is the highest dominator where it is still legal to "insert" constant // assignment. In this context, "legal" means that only one value "leaves" the // dominator and reaches Phi. void DxilViewIdStateBuilder::CollectPhiCFValuesContributingToOutputRec( PHINode *pPhi, EntryInfo &Entry, InstructionSetType &ContributingInstructions) { Function *F = pPhi->getParent()->getParent(); FuncInfo *pFuncInfo = m_FuncInfo[F].get(); unordered_map<DomTreeNodeBase<BasicBlock> *, Value *> DomTreeMarkers; // Mark predecessors of each value, so that there is a legal "definition" // point. for (unsigned i = 0; i < pPhi->getNumOperands(); i++) { Value *pValue = pPhi->getIncomingValue(i); BasicBlock *pBB = pPhi->getIncomingBlock(i); DomTreeNodeBase<BasicBlock> *pDomNode = pFuncInfo->pDomTree->getNode(pBB); auto it = DomTreeMarkers.emplace(pDomNode, pValue); DXASSERT_NOMSG(it.second || it.first->second == pValue); (void)it; } // Mark the dominator tree with "definition" values, walking up to the parent. for (unsigned i = 0; i < pPhi->getNumOperands(); i++) { Value *pValue = pPhi->getIncomingValue(i); BasicBlock *pDefBB = &F->getEntryBlock(); if (Instruction *pDefInst = dyn_cast<Instruction>(pValue)) { pDefBB = pDefInst->getParent(); } BasicBlock *pBB = pPhi->getIncomingBlock(i); if (pBB == pDefBB) { continue; // we already handled the predecessor. } DomTreeNodeBase<BasicBlock> *pDomNode = pFuncInfo->pDomTree->getNode(pBB); pDomNode = pDomNode->getIDom(); while (pDomNode) { auto it = DomTreeMarkers.emplace(pDomNode, pValue); if (!it.second) { if (it.first->second != pValue && it.first->second != nullptr) { if (!isa<Constant>(it.first->second) || !isa<Constant>(pValue)) { // Unless both are different constants, mark the "definition" point // as illegal. it.first->second = nullptr; // If both are constants, leave the marker of the first one. } } break; } // Do not go higher than a legal definition point. pBB = pDomNode->getBlock(); if (pBB == pDefBB) break; pDomNode = pDomNode->getIDom(); } } // Handle control dependence for Constant arguments of Phi. for (unsigned i = 0; i < pPhi->getNumOperands(); i++) { Value *pValue = pPhi->getIncomingValue(i); if (!isa<Constant>(pValue)) continue; // Determine the higher legal "definition" point. BasicBlock *pBB = pPhi->getIncomingBlock(i); DomTreeNodeBase<BasicBlock> *pDomNode = pFuncInfo->pDomTree->getNode(pBB); DomTreeNodeBase<BasicBlock> *pDefDomNode = pDomNode; while (pDomNode) { auto it = DomTreeMarkers.find(pDomNode); DXASSERT_NOMSG(it != DomTreeMarkers.end()); if (it->second != pValue) { DXASSERT_NOMSG(it->second == nullptr || isa<Constant>(it->second)); break; } pDefDomNode = pDomNode; pDomNode = pDomNode->getIDom(); } // Handle control dependence of this constant argument highest legal // "definition" point. pBB = pDefDomNode->getBlock(); const BasicBlockSet &CtrlDepSet = pFuncInfo->CtrlDep.GetCDBlocks(pBB); for (BasicBlock *B : CtrlDepSet) { CollectValuesContributingToOutputRec(Entry, B->getTerminator(), ContributingInstructions); } } } const DxilViewIdStateBuilder::ValueSetType & DxilViewIdStateBuilder::CollectReachingDecls(Value *pValue) { auto it = m_ReachingDeclsCache.emplace(pValue, ValueSetType()); if (it.second) { // We have not seen this value before. ValueSetType Visited; CollectReachingDeclsRec(pValue, it.first->second, Visited); } return it.first->second; } void DxilViewIdStateBuilder::CollectReachingDeclsRec( Value *pValue, ValueSetType &ReachingDecls, ValueSetType &Visited) { if (Visited.find(pValue) != Visited.end()) return; bool bInitialValue = Visited.size() == 0; Visited.emplace(pValue); if (!bInitialValue) { auto it = m_ReachingDeclsCache.find(pValue); if (it != m_ReachingDeclsCache.end()) { ReachingDecls.insert(it->second.begin(), it->second.end()); return; } } if (dyn_cast<GlobalVariable>(pValue)) { ReachingDecls.emplace(pValue); return; } if (GetElementPtrInst *pGepInst = dyn_cast<GetElementPtrInst>(pValue)) { Value *pPtrValue = pGepInst->getPointerOperand(); CollectReachingDeclsRec(pPtrValue, ReachingDecls, Visited); } else if (GEPOperator *pGepOp = dyn_cast<GEPOperator>(pValue)) { Value *pPtrValue = pGepOp->getPointerOperand(); CollectReachingDeclsRec(pPtrValue, ReachingDecls, Visited); } else if (isa<ConstantExpr>(pValue) && cast<ConstantExpr>(pValue)->getOpcode() == Instruction::AddrSpaceCast) { CollectReachingDeclsRec(cast<ConstantExpr>(pValue)->getOperand(0), ReachingDecls, Visited); } else if (AddrSpaceCastInst *pCI = dyn_cast<AddrSpaceCastInst>(pValue)) { CollectReachingDeclsRec(pCI->getOperand(0), ReachingDecls, Visited); } else if (BitCastInst *pCI = dyn_cast<BitCastInst>(pValue)) { CollectReachingDeclsRec(pCI->getOperand(0), ReachingDecls, Visited); } else if (dyn_cast<AllocaInst>(pValue)) { ReachingDecls.emplace(pValue); } else if (PHINode *phi = dyn_cast<PHINode>(pValue)) { for (Value *pPtrValue : phi->operands()) { CollectReachingDeclsRec(pPtrValue, ReachingDecls, Visited); } } else if (SelectInst *SelI = dyn_cast<SelectInst>(pValue)) { CollectReachingDeclsRec(SelI->getTrueValue(), ReachingDecls, Visited); CollectReachingDeclsRec(SelI->getFalseValue(), ReachingDecls, Visited); } else if (dyn_cast<Argument>(pValue)) { ReachingDecls.emplace(pValue); } else if (CallInst *call = dyn_cast<CallInst>(pValue)) { DXASSERT(OP::GetDxilOpFuncCallInst(call) == DXIL::OpCode::GetMeshPayload, "the function must be @dx.op.getMeshPayload here."); ReachingDecls.emplace(pValue); } else { IFT(DXC_E_GENERAL_INTERNAL_ERROR); } } const DxilViewIdStateBuilder::ValueSetType & DxilViewIdStateBuilder::CollectStores(llvm::Value *pValue) { auto it = m_StoresPerDeclCache.emplace(pValue, ValueSetType()); if (it.second) { // We have not seen this value before. ValueSetType Visited; CollectStoresRec(pValue, it.first->second, Visited); } return it.first->second; } void DxilViewIdStateBuilder::CollectStoresRec(llvm::Value *pValue, ValueSetType &Stores, ValueSetType &Visited) { if (Visited.find(pValue) != Visited.end()) return; bool bInitialValue = Visited.size() == 0; Visited.emplace(pValue); if (!bInitialValue) { auto it = m_StoresPerDeclCache.find(pValue); if (it != m_StoresPerDeclCache.end()) { Stores.insert(it->second.begin(), it->second.end()); return; } } if (isa<LoadInst>(pValue)) { return; } else if (isa<StoreInst>(pValue) || isa<AtomicCmpXchgInst>(pValue) || isa<AtomicRMWInst>(pValue)) { Stores.emplace(pValue); return; } for (auto *U : pValue->users()) { CollectStoresRec(U, Stores, Visited); } } void DxilViewIdStateBuilder::CreateViewIdSets( const std::unordered_map<unsigned, InstructionSetType> &ContributingInstructions, OutputsDependentOnViewIdType &OutputsDependentOnViewId, InputsContributingToOutputType &InputsContributingToOutputs, bool bPC) { const ShaderModel *pSM = m_pModule->GetShaderModel(); for (auto &itOut : ContributingInstructions) { unsigned outIdx = itOut.first; for (Instruction *pInst : itOut.second) { // Set output dependence on ViewId. if (DxilInst_ViewID VID = DxilInst_ViewID(pInst)) { DXASSERT(m_bUsesViewId, "otherwise, DxilModule flag not set properly"); OutputsDependentOnViewId[outIdx] = true; continue; } // Start setting output dependence on inputs. DxilSignatureElement *pSigElem = nullptr; bool bLoadOutputCPInHS = false; unsigned inpId = (unsigned)-1; int startRow = Semantic::kUndefinedRow, endRow = Semantic::kUndefinedRow; unsigned col = (unsigned)-1; if (DxilInst_LoadInput LI = DxilInst_LoadInput(pInst)) { GetUnsignedVal(LI.get_inputSigId(), &inpId); GetUnsignedVal(LI.get_colIndex(), &col); GetUnsignedVal(LI.get_rowIndex(), (uint32_t *)&startRow); pSigElem = &m_pModule->GetInputSignature().GetElement(inpId); if (pSM->IsDS() && bPC) { pSigElem = nullptr; } } else if (DxilInst_LoadOutputControlPoint LOCP = DxilInst_LoadOutputControlPoint(pInst)) { GetUnsignedVal(LOCP.get_inputSigId(), &inpId); GetUnsignedVal(LOCP.get_col(), &col); GetUnsignedVal(LOCP.get_row(), (uint32_t *)&startRow); if (pSM->IsHS()) { pSigElem = &m_pModule->GetOutputSignature().GetElement(inpId); bLoadOutputCPInHS = true; } else if (pSM->IsDS()) { if (!bPC) { pSigElem = &m_pModule->GetInputSignature().GetElement(inpId); } } else { DXASSERT_NOMSG(false); } } else if (DxilInst_LoadPatchConstant LPC = DxilInst_LoadPatchConstant(pInst)) { if (pSM->IsDS() && bPC) { GetUnsignedVal(LPC.get_inputSigId(), &inpId); GetUnsignedVal(LPC.get_col(), &col); GetUnsignedVal(LPC.get_row(), (uint32_t *)&startRow); pSigElem = &m_pModule->GetPatchConstOrPrimSignature().GetElement(inpId); } } else { continue; } // Finalize setting output dependence on inputs. if (pSigElem && pSigElem->IsAllocated()) { if (startRow != Semantic::kUndefinedRow) { endRow = startRow; } else { // The entire column contributes to output. startRow = 0; endRow = pSigElem->GetRows() - 1; } auto &ContributingInputs = InputsContributingToOutputs[outIdx]; for (int row = startRow; row <= endRow; row++) { unsigned index = GetLinearIndex(*pSigElem, row, col); if (!bLoadOutputCPInHS) { ContributingInputs.emplace(index); } else { // This HS patch-constant output depends on an input value of // LoadOutputControlPoint that is the output value of the HS main // (control-point) function. Transitively update this // (patch-constant) output dependence on main (control-point) // output. DXASSERT_NOMSG(&OutputsDependentOnViewId == &m_PCOrPrimOutputsDependentOnViewId); OutputsDependentOnViewId[outIdx] = OutputsDependentOnViewId[outIdx] || m_OutputsDependentOnViewId[0][index]; const auto it = m_InputsContributingToOutputs[0].find(index); if (it != m_InputsContributingToOutputs[0].end()) { const std::set<unsigned> &LoadOutputCPInputsContributingToOutputs = it->second; ContributingInputs.insert( LoadOutputCPInputsContributingToOutputs.begin(), LoadOutputCPInputsContributingToOutputs.end()); } } } } } } } unsigned DxilViewIdStateBuilder::GetLinearIndex(DxilSignatureElement &SigElem, int row, unsigned col) const { DXASSERT_NOMSG(row >= 0 && col < kNumComps && SigElem.GetStartRow() != Semantic::kUndefinedRow); unsigned idx = (((unsigned)row) + SigElem.GetStartRow()) * kNumComps + col + SigElem.GetStartCol(); DXASSERT_NOMSG(idx < kMaxSigScalars); (void)kMaxSigScalars; return idx; } void DxilViewIdStateBuilder::UpdateDynamicIndexUsageState() const { UpdateDynamicIndexUsageStateForSig(m_pModule->GetInputSignature(), m_InpSigDynIdxElems); UpdateDynamicIndexUsageStateForSig(m_pModule->GetOutputSignature(), m_OutSigDynIdxElems); UpdateDynamicIndexUsageStateForSig(m_pModule->GetPatchConstOrPrimSignature(), m_PCSigDynIdxElems); } void DxilViewIdStateBuilder::UpdateDynamicIndexUsageStateForSig( DxilSignature &Sig, const DynamicallyIndexedElemsType &DynIdxElems) const { for (auto it : DynIdxElems) { unsigned id = it.first; unsigned mask = it.second; DxilSignatureElement &E = Sig.GetElement(id); E.SetDynIdxCompMask(mask); } } namespace { class ComputeViewIdState : public ModulePass { public: static char ID; // Pass ID, replacement for typeid ComputeViewIdState(); bool runOnModule(Module &M) override; void getAnalysisUsage(AnalysisUsage &AU) const override; void print(raw_ostream &o, const Module *M) const override { DxilModule &DxilModule = M->GetDxilModule(); const ShaderModel *pSM = DxilModule.GetShaderModel(); if (pSM->IsCS() || pSM->IsLib()) return; auto &SerializedViewIdState = DxilModule.GetSerializedViewIdState(); DxilViewIdState ViewIdState(&DxilModule); ViewIdState.Deserialize(SerializedViewIdState.data(), SerializedViewIdState.size()); ViewIdState.PrintSets(errs()); } }; } // namespace char ComputeViewIdState::ID = 0; INITIALIZE_PASS_BEGIN(ComputeViewIdState, "viewid-state", "Compute information related to ViewID", true, true) INITIALIZE_PASS_END(ComputeViewIdState, "viewid-state", "Compute information related to ViewID", true, true) ComputeViewIdState::ComputeViewIdState() : ModulePass(ID) {} bool ComputeViewIdState::runOnModule(Module &M) { DxilModule &DxilModule = M.GetOrCreateDxilModule(); const ShaderModel *pSM = DxilModule.GetShaderModel(); if (!pSM->IsCS() && !pSM->IsLib()) { DxilViewIdState ViewIdState(&DxilModule); DxilViewIdStateBuilder Builder(ViewIdState, &DxilModule); Builder.Compute(); // Serialize viewidstate. ViewIdState.Serialize(); auto &TmpSerialized = ViewIdState.GetSerialized(); // Copy serilized viewidstate. auto &SerializedViewIdState = DxilModule.GetSerializedViewIdState(); SerializedViewIdState.clear(); SerializedViewIdState.resize(TmpSerialized.size()); SerializedViewIdState.assign(TmpSerialized.begin(), TmpSerialized.end()); return true; } return false; } void ComputeViewIdState::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); } namespace llvm { ModulePass *createComputeViewIdStatePass() { return new ComputeViewIdState(); } } // end of namespace llvm
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/ComputeViewIdState.cpp
/////////////////////////////////////////////////////////////////////////////// // // // ComputeViewIdState.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/ComputeViewIdState.h" #include "dxc/DXIL/DxilInstructions.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/Support/Global.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Function.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include <algorithm> using namespace llvm; using namespace llvm::legacy; using namespace hlsl; using llvm::legacy::FunctionPassManager; using llvm::legacy::PassManager; using std::unordered_map; using std::unordered_set; using std::vector; #define DEBUG_TYPE "viewid" DxilViewIdState::DxilViewIdState(DxilModule *pDxilModule) : m_pModule(pDxilModule) {} unsigned DxilViewIdState::getNumInputSigScalars() const { return m_NumInputSigScalars; } unsigned DxilViewIdState::getNumOutputSigScalars(unsigned StreamId) const { return m_NumOutputSigScalars[StreamId]; } unsigned DxilViewIdState::getNumPCSigScalars() const { return m_NumPCOrPrimSigScalars; } const DxilViewIdState::OutputsDependentOnViewIdType & DxilViewIdState::getOutputsDependentOnViewId(unsigned StreamId) const { return m_OutputsDependentOnViewId[StreamId]; } const DxilViewIdState::OutputsDependentOnViewIdType & DxilViewIdState::getPCOutputsDependentOnViewId() const { return m_PCOrPrimOutputsDependentOnViewId; } const DxilViewIdState::InputsContributingToOutputType & DxilViewIdState::getInputsContributingToOutputs(unsigned StreamId) const { return m_InputsContributingToOutputs[StreamId]; } const DxilViewIdState::InputsContributingToOutputType & DxilViewIdState::getInputsContributingToPCOutputs() const { return m_InputsContributingToPCOrPrimOutputs; } const DxilViewIdState::InputsContributingToOutputType & DxilViewIdState::getPCInputsContributingToOutputs() const { return m_PCInputsContributingToOutputs; } namespace { void PrintOutputsDependentOnViewId( llvm::raw_ostream &OS, llvm::StringRef SetName, unsigned NumOutputs, const DxilViewIdState::OutputsDependentOnViewIdType &OutputsDependentOnViewId) { OS << SetName << " dependent on ViewId: { "; bool bFirst = true; for (unsigned i = 0; i < NumOutputs; i++) { if (OutputsDependentOnViewId[i]) { if (!bFirst) OS << ", "; OS << i; bFirst = false; } } OS << " }\n"; } void PrintInputsContributingToOutputs( llvm::raw_ostream &OS, llvm::StringRef InputSetName, llvm::StringRef OutputSetName, const DxilViewIdState::InputsContributingToOutputType &InputsContributingToOutputs) { OS << InputSetName << " contributing to computation of " << OutputSetName << ":\n"; for (auto &it : InputsContributingToOutputs) { unsigned outIdx = it.first; auto &Inputs = it.second; OS << "output " << outIdx << " depends on inputs: { "; bool bFirst = true; for (unsigned i : Inputs) { if (!bFirst) OS << ", "; OS << i; bFirst = false; } OS << " }\n"; } } } // namespace void DxilViewIdState::PrintSets(llvm::raw_ostream &OS) { const ShaderModel *pSM = m_pModule->GetShaderModel(); OS << "ViewId state: \n"; if (pSM->IsGS()) { OS << "Number of inputs: " << m_NumInputSigScalars << ", outputs: { " << m_NumOutputSigScalars[0] << ", " << m_NumOutputSigScalars[1] << ", " << m_NumOutputSigScalars[2] << ", " << m_NumOutputSigScalars[3] << " }" << ", patchconst: " << m_NumPCOrPrimSigScalars << "\n"; } else if (pSM->IsMS()) { OS << "Number of inputs: " << m_NumInputSigScalars << ", vertex outputs: " << m_NumOutputSigScalars[0] << ", primitive outputs: " << m_NumPCOrPrimSigScalars << "\n"; } else { OS << "Number of inputs: " << m_NumInputSigScalars << ", outputs: " << m_NumOutputSigScalars[0] << ", patchconst: " << m_NumPCOrPrimSigScalars << "\n"; } if (pSM->IsGS()) { PrintOutputsDependentOnViewId(OS, "Outputs for Stream0", m_NumOutputSigScalars[0], m_OutputsDependentOnViewId[0]); PrintOutputsDependentOnViewId(OS, "Outputs for Stream1", m_NumOutputSigScalars[1], m_OutputsDependentOnViewId[1]); PrintOutputsDependentOnViewId(OS, "Outputs for Stream2", m_NumOutputSigScalars[2], m_OutputsDependentOnViewId[2]); PrintOutputsDependentOnViewId(OS, "Outputs for Stream3", m_NumOutputSigScalars[3], m_OutputsDependentOnViewId[3]); } else if (pSM->IsMS()) { PrintOutputsDependentOnViewId(OS, "Vertex Outputs", m_NumOutputSigScalars[0], m_OutputsDependentOnViewId[0]); } else { PrintOutputsDependentOnViewId(OS, "Outputs", m_NumOutputSigScalars[0], m_OutputsDependentOnViewId[0]); } if (pSM->IsHS()) { PrintOutputsDependentOnViewId(OS, "PCOutputs", m_NumPCOrPrimSigScalars, m_PCOrPrimOutputsDependentOnViewId); } else if (pSM->IsMS()) { PrintOutputsDependentOnViewId(OS, "Primitive Outputs", m_NumPCOrPrimSigScalars, m_PCOrPrimOutputsDependentOnViewId); } if (pSM->IsGS()) { PrintInputsContributingToOutputs(OS, "Inputs", "Outputs for Stream0", m_InputsContributingToOutputs[0]); PrintInputsContributingToOutputs(OS, "Inputs", "Outputs for Stream1", m_InputsContributingToOutputs[1]); PrintInputsContributingToOutputs(OS, "Inputs", "Outputs for Stream2", m_InputsContributingToOutputs[2]); PrintInputsContributingToOutputs(OS, "Inputs", "Outputs for Stream3", m_InputsContributingToOutputs[3]); } else if (pSM->IsMS()) { PrintInputsContributingToOutputs(OS, "Inputs", "Vertex Outputs", m_InputsContributingToOutputs[0]); } else { PrintInputsContributingToOutputs(OS, "Inputs", "Outputs", m_InputsContributingToOutputs[0]); } if (pSM->IsHS()) { PrintInputsContributingToOutputs(OS, "Inputs", "PCOutputs", m_InputsContributingToPCOrPrimOutputs); } else if (pSM->IsMS()) { PrintInputsContributingToOutputs(OS, "Inputs", "Primitive Outputs", m_InputsContributingToPCOrPrimOutputs); } else if (pSM->IsDS()) { PrintInputsContributingToOutputs(OS, "PCInputs", "Outputs", m_PCInputsContributingToOutputs); } OS << "\n"; } void DxilViewIdState::Clear() { m_NumInputSigScalars = 0; for (unsigned i = 0; i < kNumStreams; i++) { m_NumOutputSigScalars[i] = 0; m_OutputsDependentOnViewId[i].reset(); m_InputsContributingToOutputs[i].clear(); } m_NumPCOrPrimSigScalars = 0; m_PCOrPrimOutputsDependentOnViewId.reset(); m_InputsContributingToPCOrPrimOutputs.clear(); m_PCInputsContributingToOutputs.clear(); m_SerializedState.clear(); } namespace { unsigned RoundUpToUINT(unsigned x) { return (x + 31) / 32; } void SerializeOutputsDependentOnViewId( unsigned NumOutputs, const DxilViewIdState::OutputsDependentOnViewIdType &OutputsDependentOnViewId, unsigned *&pData) { unsigned NumOutUINTs = RoundUpToUINT(NumOutputs); // Serialize output dependence on ViewId. for (unsigned i = 0; i < NumOutUINTs; i++) { unsigned x = 0; for (unsigned j = 0; j < std::min(32u, NumOutputs - 32u * i); j++) { if (OutputsDependentOnViewId[i * 32 + j]) { x |= (1u << j); } } *pData++ = x; } } void SerializeInputsContributingToOutput( unsigned NumInputs, unsigned NumOutputs, const DxilViewIdState::InputsContributingToOutputType &InputsContributingToOutputs, unsigned *&pData) { unsigned NumOutUINTs = RoundUpToUINT(NumOutputs); // Serialize output dependence on inputs. for (unsigned outputIdx = 0; outputIdx < NumOutputs; outputIdx++) { auto it = InputsContributingToOutputs.find(outputIdx); if (it != InputsContributingToOutputs.end()) { for (unsigned inputIdx : it->second) { unsigned w = outputIdx / 32; unsigned b = outputIdx % 32; pData[inputIdx * NumOutUINTs + w] |= (1u << b); } } } pData += NumInputs * NumOutUINTs; } } // namespace void DxilViewIdState::Serialize() { const ShaderModel *pSM = m_pModule->GetShaderModel(); m_SerializedState.clear(); // Compute serialized state size in UINTs. unsigned NumInputs = getNumInputSigScalars(); unsigned NumStreams = pSM->IsGS() ? kNumStreams : 1; unsigned Size = 0; Size += 1; // #Inputs. for (unsigned StreamId = 0; StreamId < NumStreams; StreamId++) { Size += 1; // #Outputs for stream StreamId. unsigned NumOutputs = getNumOutputSigScalars(StreamId); unsigned NumOutUINTs = RoundUpToUINT(NumOutputs); if (m_bUsesViewId) { Size += NumOutUINTs; // m_OutputsDependentOnViewId[StreamId] } Size += NumInputs * NumOutUINTs; // m_InputsContributingToOutputs[StreamId] } if (pSM->IsHS() || pSM->IsDS() || pSM->IsMS()) { Size += 1; // #PatchConstant. unsigned NumPCs = getNumPCSigScalars(); unsigned NumPCUINTs = RoundUpToUINT(NumPCs); if (pSM->IsHS() || pSM->IsMS()) { if (m_bUsesViewId) { Size += NumPCUINTs; // m_PCOrPrimOutputsDependentOnViewId } Size += NumInputs * NumPCUINTs; // m_InputsContributingToPCOrPrimOutputs } else { unsigned NumOutputs = getNumOutputSigScalars(0); unsigned NumOutUINTs = RoundUpToUINT(NumOutputs); Size += NumPCs * NumOutUINTs; // m_PCInputsContributingToOutputs } } m_SerializedState.resize(Size); std::fill(m_SerializedState.begin(), m_SerializedState.end(), 0u); // Serialize ViewId state. unsigned *pData = &m_SerializedState[0]; *pData++ = NumInputs; for (unsigned StreamId = 0; StreamId < NumStreams; StreamId++) { unsigned NumOutputs = getNumOutputSigScalars(StreamId); *pData++ = NumOutputs; if (m_bUsesViewId) { SerializeOutputsDependentOnViewId( NumOutputs, m_OutputsDependentOnViewId[StreamId], pData); } SerializeInputsContributingToOutput( NumInputs, NumOutputs, m_InputsContributingToOutputs[StreamId], pData); } if (pSM->IsHS() || pSM->IsDS() || pSM->IsMS()) { unsigned NumPCs = getNumPCSigScalars(); *pData++ = NumPCs; if (pSM->IsHS() || pSM->IsMS()) { if (m_bUsesViewId) { SerializeOutputsDependentOnViewId( NumPCs, m_PCOrPrimOutputsDependentOnViewId, pData); } SerializeInputsContributingToOutput( NumInputs, NumPCs, m_InputsContributingToPCOrPrimOutputs, pData); } else { unsigned NumOutputs = getNumOutputSigScalars(0); SerializeInputsContributingToOutput( NumPCs, NumOutputs, m_PCInputsContributingToOutputs, pData); } } DXASSERT_NOMSG(pData == (&m_SerializedState[0] + Size)); } const vector<unsigned> &DxilViewIdState::GetSerialized() { if (m_SerializedState.empty()) Serialize(); return m_SerializedState; } const vector<unsigned> &DxilViewIdState::GetSerialized() const { return m_SerializedState; } namespace { unsigned DeserializeOutputsDependentOnViewId( unsigned NumOutputs, DxilViewIdState::OutputsDependentOnViewIdType &OutputsDependentOnViewId, const unsigned *pData, unsigned DataSize) { unsigned NumOutUINTs = RoundUpToUINT(NumOutputs); IFTBOOL(NumOutUINTs <= DataSize, DXC_E_GENERAL_INTERNAL_ERROR); // Deserialize output dependence on ViewId. for (unsigned i = 0; i < NumOutUINTs; i++) { unsigned x = *pData++; for (unsigned j = 0; j < std::min(32u, NumOutputs - 32u * i); j++) { if (x & (1u << j)) { OutputsDependentOnViewId[i * 32 + j] = true; } } } return NumOutUINTs; } unsigned DeserializeInputsContributingToOutput( unsigned NumInputs, unsigned NumOutputs, DxilViewIdState::InputsContributingToOutputType &InputsContributingToOutputs, const unsigned *pData, unsigned DataSize) { unsigned NumOutUINTs = RoundUpToUINT(NumOutputs); unsigned Size = NumInputs * NumOutUINTs; IFTBOOL(Size <= DataSize, DXC_E_GENERAL_INTERNAL_ERROR); // Deserialize output dependence on inputs. for (unsigned inputIdx = 0; inputIdx < NumInputs; inputIdx++) { for (unsigned outputIdx = 0; outputIdx < NumOutputs; outputIdx++) { unsigned w = outputIdx / 32; unsigned b = outputIdx % 32; if (pData[inputIdx * NumOutUINTs + w] & (1u << b)) { InputsContributingToOutputs[outputIdx].insert(inputIdx); } } } return Size; } } // namespace void DxilViewIdState::Deserialize(const unsigned *pData, unsigned DataSizeInUINTs) { Clear(); m_SerializedState.resize(DataSizeInUINTs); memcpy(m_SerializedState.data(), pData, DataSizeInUINTs * sizeof(unsigned)); const ShaderModel *pSM = m_pModule->GetShaderModel(); m_bUsesViewId = m_pModule->m_ShaderFlags.GetViewID(); unsigned ConsumedUINTs = 0; IFTBOOL(DataSizeInUINTs - ConsumedUINTs >= 1, DXC_E_GENERAL_INTERNAL_ERROR); unsigned NumInputs = pData[ConsumedUINTs++]; m_NumInputSigScalars = NumInputs; unsigned NumStreams = pSM->IsGS() ? kNumStreams : 1; for (unsigned StreamId = 0; StreamId < NumStreams; StreamId++) { IFTBOOL(DataSizeInUINTs - ConsumedUINTs >= 1, DXC_E_GENERAL_INTERNAL_ERROR); unsigned NumOutputs = pData[ConsumedUINTs++]; m_NumOutputSigScalars[StreamId] = NumOutputs; if (m_bUsesViewId) { ConsumedUINTs += DeserializeOutputsDependentOnViewId( NumOutputs, m_OutputsDependentOnViewId[StreamId], &pData[ConsumedUINTs], DataSizeInUINTs - ConsumedUINTs); } ConsumedUINTs += DeserializeInputsContributingToOutput( NumInputs, NumOutputs, m_InputsContributingToOutputs[StreamId], &pData[ConsumedUINTs], DataSizeInUINTs - ConsumedUINTs); } if (pSM->IsHS() || pSM->IsDS() || pSM->IsMS()) { IFTBOOL(DataSizeInUINTs - ConsumedUINTs >= 1, DXC_E_GENERAL_INTERNAL_ERROR); unsigned NumPCs = pData[ConsumedUINTs++]; m_NumPCOrPrimSigScalars = NumPCs; if (pSM->IsHS() || pSM->IsMS()) { if (m_bUsesViewId) { ConsumedUINTs += DeserializeOutputsDependentOnViewId( NumPCs, m_PCOrPrimOutputsDependentOnViewId, &pData[ConsumedUINTs], DataSizeInUINTs - ConsumedUINTs); } ConsumedUINTs += DeserializeInputsContributingToOutput( NumInputs, NumPCs, m_InputsContributingToPCOrPrimOutputs, &pData[ConsumedUINTs], DataSizeInUINTs - ConsumedUINTs); } else { unsigned NumOutputs = getNumOutputSigScalars(0); ConsumedUINTs += DeserializeInputsContributingToOutput( NumPCs, NumOutputs, m_PCInputsContributingToOutputs, &pData[ConsumedUINTs], DataSizeInUINTs - ConsumedUINTs); } } DXASSERT_NOMSG(ConsumedUINTs == DataSizeInUINTs); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilCondenseResources.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilCondenseResources.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Provides a pass to make resource IDs zero-based and dense. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilInstructions.h" #include "dxc/DXIL/DxilMetadataHelper.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilResourceBinding.h" #include "dxc/DXIL/DxilSignatureElement.h" #include "dxc/DXIL/DxilTypeSystem.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/DxcBindingTable/DxcBindingTable.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/HLSL/DxilSpanAllocator.h" #include "dxc/HLSL/HLMatrixType.h" #include "dxc/HLSL/HLModule.h" #include "dxc/Support/Global.h" #include "llvm/Analysis/DxilValueCache.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" #include "llvm/Pass.h" #include "llvm/Transforms/Utils/Local.h" #include <memory> #include <unordered_set> using namespace llvm; using namespace hlsl; // Resource rangeID remap. namespace { struct ResourceID { DXIL::ResourceClass Class; // Resource class. unsigned ID; // Resource ID, as specified on entry. bool operator<(const ResourceID &other) const { if (Class < other.Class) return true; if (Class > other.Class) return false; if (ID < other.ID) return true; return false; } }; struct RemapEntry { ResourceID ResID; // Resource identity, as specified on entry. DxilResourceBase *Resource; // In-memory resource representation. unsigned Index; // Index in resource vector - new ID for the resource. }; typedef std::map<ResourceID, RemapEntry> RemapEntryCollection; template <typename TResource> void BuildRewrites(const std::vector<std::unique_ptr<TResource>> &Rs, RemapEntryCollection &C) { const unsigned s = (unsigned)Rs.size(); for (unsigned i = 0; i < s; ++i) { const std::unique_ptr<TResource> &R = Rs[i]; if (R->GetID() != i) { ResourceID RId = {R->GetClass(), R->GetID()}; RemapEntry RE = {RId, R.get(), i}; C[RId] = RE; } } } // Build m_rewrites, returns 'true' if any rewrites are needed. bool BuildRewriteMap(RemapEntryCollection &rewrites, DxilModule &DM) { BuildRewrites(DM.GetCBuffers(), rewrites); BuildRewrites(DM.GetSRVs(), rewrites); BuildRewrites(DM.GetUAVs(), rewrites); BuildRewrites(DM.GetSamplers(), rewrites); return !rewrites.empty(); } } // namespace class DxilResourceRegisterAllocator { private: SpacesAllocator<unsigned, hlsl::DxilCBuffer> m_reservedCBufferRegisters; SpacesAllocator<unsigned, hlsl::DxilSampler> m_reservedSamplerRegisters; SpacesAllocator<unsigned, hlsl::DxilResource> m_reservedUAVRegisters; SpacesAllocator<unsigned, hlsl::DxilResource> m_reservedSRVRegisters; template <typename T> static void GatherReservedRegisters(const std::vector<std::unique_ptr<T>> &ResourceList, SpacesAllocator<unsigned, T> &SAlloc) { for (auto &res : ResourceList) { if (res->IsAllocated()) { typename SpacesAllocator<unsigned, T>::Allocator &Alloc = SAlloc.Get(res->GetSpaceID()); Alloc.ForceInsertAndClobber(res.get(), res->GetLowerBound(), res->GetUpperBound()); if (res->IsUnbounded()) Alloc.SetUnbounded(res.get()); } } } template <typename T> static bool AllocateRegisters(LLVMContext &Ctx, const std::vector<std::unique_ptr<T>> &resourceList, SpacesAllocator<unsigned, T> &ReservedRegisters, unsigned AutoBindingSpace) { bool bChanged = false; SpacesAllocator<unsigned, T> SAlloc; // Reserve explicitly allocated resources for (auto &res : resourceList) { const unsigned space = res->GetSpaceID(); typename SpacesAllocator<unsigned, T>::Allocator &alloc = SAlloc.Get(space); typename SpacesAllocator<unsigned, T>::Allocator &reservedAlloc = ReservedRegisters.Get(space); if (res->IsAllocated()) { const unsigned reg = res->GetLowerBound(); const T *conflict = nullptr; if (res->IsUnbounded()) { const T *unbounded = alloc.GetUnbounded(); if (unbounded) { dxilutil::EmitErrorOnGlobalVariable( Ctx, dyn_cast<GlobalVariable>(res->GetGlobalSymbol()), Twine("more than one unbounded resource (") + unbounded->GetGlobalName() + (" and ") + res->GetGlobalName() + (") in space ") + Twine(space)); } else { conflict = alloc.Insert(res.get(), reg, res->GetUpperBound()); if (!conflict) { alloc.SetUnbounded(res.get()); reservedAlloc.SetUnbounded(res.get()); } } } else { conflict = alloc.Insert(res.get(), reg, res->GetUpperBound()); } if (conflict) { dxilutil::EmitErrorOnGlobalVariable( Ctx, dyn_cast<GlobalVariable>(res->GetGlobalSymbol()), ((res->IsUnbounded()) ? Twine("unbounded ") : Twine("")) + Twine("resource ") + res->GetGlobalName() + Twine(" at register ") + Twine(reg) + Twine(" overlaps with resource ") + conflict->GetGlobalName() + Twine(" at register ") + Twine(conflict->GetLowerBound()) + Twine(", space ") + Twine(space)); } else { // Also add this to the reserved (unallocatable) range, if it wasn't // already there. reservedAlloc.ForceInsertAndClobber(res.get(), res->GetLowerBound(), res->GetUpperBound()); } } } // Allocate unallocated resources for (auto &res : resourceList) { if (res->IsAllocated()) continue; unsigned space = res->GetSpaceID(); if (space == UINT_MAX) space = AutoBindingSpace; typename SpacesAllocator<unsigned, T>::Allocator &alloc = SAlloc.Get(space); typename SpacesAllocator<unsigned, T>::Allocator &reservedAlloc = ReservedRegisters.Get(space); unsigned reg = 0; unsigned end = 0; bool allocateSpaceFound = false; if (res->IsUnbounded()) { if (alloc.GetUnbounded() != nullptr) { const T *unbounded = alloc.GetUnbounded(); dxilutil::EmitErrorOnGlobalVariable( Ctx, dyn_cast<GlobalVariable>(res->GetGlobalSymbol()), Twine("more than one unbounded resource (") + unbounded->GetGlobalName() + Twine(" and ") + res->GetGlobalName() + Twine(") in space ") + Twine(space)); continue; } if (reservedAlloc.FindForUnbounded(reg)) { end = UINT_MAX; allocateSpaceFound = true; } } else if (reservedAlloc.Find(res->GetRangeSize(), reg)) { end = reg + res->GetRangeSize() - 1; allocateSpaceFound = true; } if (allocateSpaceFound) { bool success = reservedAlloc.Insert(res.get(), reg, end) == nullptr; DXASSERT_NOMSG(success); success = alloc.Insert(res.get(), reg, end) == nullptr; DXASSERT_NOMSG(success); if (res->IsUnbounded()) { alloc.SetUnbounded(res.get()); reservedAlloc.SetUnbounded(res.get()); } res->SetLowerBound(reg); res->SetSpaceID(space); bChanged = true; } else { dxilutil::EmitErrorOnGlobalVariable( Ctx, dyn_cast<GlobalVariable>(res->GetGlobalSymbol()), ((res->IsUnbounded()) ? Twine("unbounded ") : Twine("")) + Twine("resource ") + res->GetGlobalName() + Twine(" could not be allocated")); } } return bChanged; } public: void GatherReservedRegisters(DxilModule &DM) { // For backcompat with FXC, shader models 5.0 and below will not // auto-allocate resources at a register explicitely assigned to even an // unused resource. if (DM.GetLegacyResourceReservation()) { GatherReservedRegisters(DM.GetCBuffers(), m_reservedCBufferRegisters); GatherReservedRegisters(DM.GetSamplers(), m_reservedSamplerRegisters); GatherReservedRegisters(DM.GetUAVs(), m_reservedUAVRegisters); GatherReservedRegisters(DM.GetSRVs(), m_reservedSRVRegisters); } } bool AllocateRegisters(DxilModule &DM) { uint32_t AutoBindingSpace = DM.GetAutoBindingSpace(); if (AutoBindingSpace == UINT_MAX) { // For libraries, we don't allocate unless AutoBindingSpace is set. if (DM.GetShaderModel()->IsLib()) return false; // For shaders, we allocate in space 0 by default. AutoBindingSpace = 0; } bool bChanged = false; bChanged |= AllocateRegisters(DM.GetCtx(), DM.GetCBuffers(), m_reservedCBufferRegisters, AutoBindingSpace); bChanged |= AllocateRegisters(DM.GetCtx(), DM.GetSamplers(), m_reservedSamplerRegisters, AutoBindingSpace); bChanged |= AllocateRegisters(DM.GetCtx(), DM.GetUAVs(), m_reservedUAVRegisters, AutoBindingSpace); bChanged |= AllocateRegisters(DM.GetCtx(), DM.GetSRVs(), m_reservedSRVRegisters, AutoBindingSpace); return bChanged; } }; bool llvm::AreDxilResourcesDense(llvm::Module *M, hlsl::DxilResourceBase **ppNonDense) { DxilModule &DM = M->GetOrCreateDxilModule(); RemapEntryCollection rewrites; if (BuildRewriteMap(rewrites, DM)) { *ppNonDense = rewrites.begin()->second.Resource; return false; } else { *ppNonDense = nullptr; return true; } } static bool GetConstantLegalGepForSplitAlloca(GetElementPtrInst *gep, DxilValueCache *DVC, int64_t *ret) { if (gep->getNumIndices() != 2) { return false; } if (ConstantInt *Index0 = dyn_cast<ConstantInt>(gep->getOperand(1))) { if (Index0->getLimitedValue() != 0) { return false; } } else { return false; } if (ConstantInt *C = DVC->GetConstInt(gep->getOperand(2))) { int64_t index = C->getSExtValue(); *ret = index; return true; } return false; } static bool LegalizeResourceArrays(Module &M, DxilValueCache *DVC) { SmallVector<AllocaInst *, 16> Allocas; bool Changed = false; // Find all allocas for (Function &F : M) { if (F.empty()) continue; BasicBlock &BB = F.getEntryBlock(); for (Instruction &I : BB) { if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { Type *ty = AI->getAllocatedType(); // Only handle single dimentional array. Since this pass runs after // MultiDimArrayToOneDimArray, it should handle all arrays. if (ty->isArrayTy() && hlsl::dxilutil::IsHLSLResourceType(ty->getArrayElementType())) Allocas.push_back(AI); } } } SmallVector<AllocaInst *, 16> ScalarAllocas; std::unordered_map<GetElementPtrInst *, int64_t> ConstIndices; for (AllocaInst *AI : Allocas) { Type *ty = AI->getAllocatedType(); Type *resType = ty->getArrayElementType(); ScalarAllocas.clear(); ConstIndices.clear(); bool SplitAlloca = true; for (User *U : AI->users()) { if (GetElementPtrInst *gep = dyn_cast<GetElementPtrInst>(U)) { int64_t index = 0; if (!GetConstantLegalGepForSplitAlloca(gep, DVC, &index)) { SplitAlloca = false; break; } // Out of bounds. Out of bounds GEP's will trigger and error later. if (index < 0 || index >= (int64_t)ty->getArrayNumElements()) { SplitAlloca = false; Changed = true; dxilutil::EmitErrorOnInstruction( gep, "Accessing resource array with out-out-bounds index."); } ConstIndices[gep] = index; } else { SplitAlloca = false; break; } } if (SplitAlloca) { IRBuilder<> B(AI); ScalarAllocas.resize(ty->getArrayNumElements()); for (auto it = AI->user_begin(), end = AI->user_end(); it != end;) { GetElementPtrInst *gep = cast<GetElementPtrInst>(*(it++)); assert(ConstIndices.count(gep)); int64_t idx = ConstIndices[gep]; AllocaInst *ScalarAI = ScalarAllocas[idx]; if (!ScalarAI) { ScalarAI = B.CreateAlloca(resType); ScalarAllocas[idx] = ScalarAI; } gep->replaceAllUsesWith(ScalarAI); gep->eraseFromParent(); } AI->eraseFromParent(); Changed = true; } } return Changed; } typedef std::unordered_map<std::string, DxilResourceBase *> ResourceMap; template <typename T> static inline void GatherResources(const std::vector<std::unique_ptr<T>> &List, ResourceMap *Map) { for (const std::unique_ptr<T> &ptr : List) { (*Map)[ptr->GetGlobalName()] = ptr.get(); } } static bool LegalizeResources(Module &M, DxilValueCache *DVC) { bool Changed = false; Changed |= LegalizeResourceArrays(M, DVC); // Simple pass to collect resource PHI's SmallVector<PHINode *, 8> PHIs; for (Function &F : M) { for (BasicBlock &BB : F) { for (Instruction &I : BB) { if (PHINode *PN = dyn_cast<PHINode>(&I)) { if (hlsl::dxilutil::IsHLSLResourceType(PN->getType())) { PHIs.push_back(PN); } } else { break; } } } } SmallVector<Instruction *, 8> DCEWorklist; // Try to simplify those PHI's with DVC and collect them in DCEWorklist for (unsigned Attempt = 0, MaxAttempt = PHIs.size(); Attempt < MaxAttempt; Attempt++) { bool LocalChanged = false; for (unsigned i = 0; i < PHIs.size(); i++) { PHINode *PN = PHIs[i]; if (Value *V = DVC->GetValue(PN)) { PN->replaceAllUsesWith(V); LocalChanged = true; DCEWorklist.push_back(PN); PHIs.erase(PHIs.begin() + i); } else { i++; } } Changed |= LocalChanged; if (!LocalChanged) break; } // Collect Resource GV loads for (GlobalVariable &GV : M.globals()) { Type *Ty = GV.getType()->getPointerElementType(); while (Ty->isArrayTy()) Ty = Ty->getArrayElementType(); if (!hlsl::dxilutil::IsHLSLResourceType(Ty)) continue; SmallVector<User *, 4> WorkList(GV.user_begin(), GV.user_end()); while (WorkList.size()) { User *U = WorkList.pop_back_val(); if (LoadInst *Load = dyn_cast<LoadInst>(U)) { DCEWorklist.push_back(Load); } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { for (User *GepU : GEP->users()) WorkList.push_back(GepU); } } } // Simple DCE while (DCEWorklist.size()) { Instruction *I = DCEWorklist.back(); DCEWorklist.pop_back(); if (llvm::isInstructionTriviallyDead(I)) { for (Use &Op : I->operands()) if (Instruction *OpI = dyn_cast<Instruction>(Op.get())) DCEWorklist.push_back(OpI); I->eraseFromParent(); // Remove the instruction from the worklist if it still exists in it. DCEWorklist.erase(std::remove(DCEWorklist.begin(), DCEWorklist.end(), I), DCEWorklist.end()); Changed = true; } } return Changed; } namespace { class DxilLowerCreateHandleForLib : public ModulePass { private: RemapEntryCollection m_rewrites; DxilModule *m_DM; bool m_HasDbgInfo; bool m_bIsLib; bool m_bLegalizationFailed; public: static char ID; // Pass identification, replacement for typeid explicit DxilLowerCreateHandleForLib() : ModulePass(ID) {} void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DxilValueCache>(); } StringRef getPassName() const override { return "DXIL Lower createHandleForLib"; } bool runOnModule(Module &M) override { DxilModule &DM = M.GetOrCreateDxilModule(); m_DM = &DM; // Clear llvm used to remove unused resource. m_DM->ClearLLVMUsed(); m_bIsLib = DM.GetShaderModel()->IsLib(); m_bLegalizationFailed = false; FailOnPoisonResources(); bool bChanged = false; if (DM.GetShaderModel()->IsSM66Plus()) { bChanged = PatchDynamicTBuffers(DM); SetNonUniformIndexForDynamicResource(DM); } unsigned numResources = DM.GetCBuffers().size() + DM.GetUAVs().size() + DM.GetSRVs().size() + DM.GetSamplers().size(); if (!numResources) { // Remove createHandleFromHandle when not a lib if (!m_bIsLib) RemoveCreateHandleFromHandle(DM); return false; } // Switch tbuffers to SRVs, as they have been treated as cbuffers up to this // point. if (DM.GetCBuffers().size()) bChanged |= PatchTBuffers(DM); // Assign resource binding overrides. hlsl::ApplyBindingTableFromMetadata(DM); // Gather reserved resource registers while we still have // unused resources that might have explicit register assignments. DxilResourceRegisterAllocator ResourceRegisterAllocator; ResourceRegisterAllocator.GatherReservedRegisters(DM); // Remove unused resources. DM.RemoveResourcesWithUnusedSymbols(); unsigned newResources = DM.GetCBuffers().size() + DM.GetUAVs().size() + DM.GetSRVs().size() + DM.GetSamplers().size(); bChanged = bChanged || (numResources != newResources); if (0 == newResources) return bChanged; { DxilValueCache *DVC = &getAnalysis<DxilValueCache>(); bool bLocalChanged = LegalizeResources(M, DVC); if (bLocalChanged) { // Remove unused resources. DM.RemoveResourcesWithUnusedSymbols(); } bChanged |= bLocalChanged; } bChanged |= ResourceRegisterAllocator.AllocateRegisters(DM); // Fill in top-level CBuffer variable usage bit UpdateCBufferUsage(); if (m_bIsLib && DM.GetShaderModel()->GetMinor() == ShaderModel::kOfflineMinor) return bChanged; // Make sure no select on resource. bChanged |= RemovePhiOnResource(); if (m_bLegalizationFailed) return bChanged; if (m_bIsLib) { if (DM.GetOP()->UseMinPrecision()) bChanged |= UpdateStructTypeForLegacyLayout(); return bChanged; } bChanged = true; // Load up debug information, to cross-reference values and the instructions // used to load them. m_HasDbgInfo = llvm::getDebugMetadataVersionFromModule(M) != 0; GenerateDxilResourceHandles(); if (DM.GetOP()->UseMinPrecision()) UpdateStructTypeForLegacyLayout(); // Change resource symbol into undef. UpdateResourceSymbols(); // Remove createHandleFromHandle when not a lib. RemoveCreateHandleFromHandle(DM); // Remove unused createHandleForLib functions. dxilutil::RemoveUnusedFunctions(M, DM.GetEntryFunction(), DM.GetPatchConstantFunction(), m_bIsLib); // Erase type annotations for structures no longer used DM.GetTypeSystem().EraseUnusedStructAnnotations(); return bChanged; } private: void FailOnPoisonResources(); bool RemovePhiOnResource(); void UpdateResourceSymbols(); void ReplaceResourceUserWithHandle(DxilResource &res, LoadInst *load, Instruction *handle); void TranslateDxilResourceUses(DxilResourceBase &res); void GenerateDxilResourceHandles(); bool UpdateStructTypeForLegacyLayout(); // Switch CBuffer for SRV for TBuffers. bool PatchDynamicTBuffers(DxilModule &DM); bool PatchTBuffers(DxilModule &DM); void PatchTBufferUse(Value *V, DxilModule &DM, DenseSet<Value *> &patchedSet); void UpdateCBufferUsage(); void SetNonUniformIndexForDynamicResource(DxilModule &DM); void RemoveCreateHandleFromHandle(DxilModule &DM); }; } // namespace // Phi on resource. namespace { typedef std::unordered_map<Value *, Value *> ValueToValueMap; typedef llvm::SetVector<Value *> ValueSetVector; typedef llvm::SmallVector<Value *, 4> IndexVector; typedef std::unordered_map<Value *, IndexVector> ValueToIdxMap; //#define SUPPORT_SELECT_ON_ALLOCA // Errors: class ResourceUseErrors { bool m_bErrorsReported; public: ResourceUseErrors() : m_bErrorsReported(false) {} enum ErrorCode { // Collision between use of one resource GV and another. // All uses must be guaranteed to resolve to only one GV. // Additionally, when writing resource to alloca, all uses // of that alloca are considered resolving to a single GV. GVConflicts, // static global resources are disallowed for libraries at this time. // for non-library targets, they should have been eliminated already. StaticGVUsed, // user function calls with resource params or return type are // are currently disallowed for libraries. UserCallsWithResources, // When searching up from store pointer looking for alloca, // we encountered an unexpted value type UnexpectedValuesFromStorePointer, // Without SUPPORT_SELECT_ON_ALLOCA, phi/select on alloca based // pointer is disallowed, since this scenario is still untested. // This error also covers any other unknown alloca pointer uses. // Supported: // alloca (-> gep)? -> load -> ... // alloca (-> gep)? -> store. // Unsupported without SUPPORT_SELECT_ON_ALLOCA: // alloca (-> gep)? -> phi/select -> ... AllocaUserDisallowed, MismatchHandleAnnotation, MixDynamicResourceWithBindingResource, MismatchIsSampler, #ifdef SUPPORT_SELECT_ON_ALLOCA // Conflict in select/phi between GV pointer and alloca pointer. This // algorithm can't handle this case. AllocaSelectConflict, #endif ErrorCodeCount }; const StringRef ErrorText[ErrorCodeCount] = { "local resource not guaranteed to map to unique global resource.", "static global resource use is disallowed for library functions.", "exported library functions cannot have resource parameters or return " "value.", "internal error: unexpected instruction type when looking for alloca " "from store.", "phi/select disallowed on pointers to local resources.", "mismatch handle annotation", "possible mixing dynamic resource and binding resource", "merging sampler handle and resource handle", #ifdef SUPPORT_SELECT_ON_ALLOCA , "unable to resolve merge of global and local resource pointers." #endif }; ValueSetVector ErrorSets[ErrorCodeCount]; // Ulitimately, the goal of ErrorUsers is to mark all create handles // so we don't try to report errors on them again later. std::unordered_set<Value *> ErrorUsers; // users of error values bool AddErrorUsers(Value *V) { auto it = ErrorUsers.insert(V); if (!it.second) return false; // already there if (isa<GEPOperator>(V) || isa<LoadInst>(V) || isa<PHINode>(V) || isa<SelectInst>(V) || isa<AllocaInst>(V)) { for (auto U : V->users()) { AddErrorUsers(U); } } else if (isa<StoreInst>(V)) { AddErrorUsers(cast<StoreInst>(V)->getPointerOperand()); } // create handle will be marked, but users not followed return true; } void ReportError(ErrorCode ec, Value *V) { DXASSERT_NOMSG(ec < ErrorCodeCount); if (!ErrorSets[ec].insert(V)) return; // Error already reported AddErrorUsers(V); m_bErrorsReported = true; if (Instruction *I = dyn_cast<Instruction>(V)) { dxilutil::EmitErrorOnInstruction(I, ErrorText[ec]); } else { StringRef Name = V->getName(); std::string escName; if (isa<Function>(V)) { llvm::raw_string_ostream os(escName); dxilutil::PrintEscapedString(Name, os); os.flush(); Name = escName; } V->getContext().emitError(Twine(ErrorText[ec]) + " Value: " + Name); } } bool ErrorsReported() { return m_bErrorsReported; } }; unsigned CountArrayDimensions(Type *Ty, // Optionally collect dimensions SmallVector<unsigned, 4> *dims = nullptr) { if (Ty->isPointerTy()) Ty = Ty->getPointerElementType(); unsigned dim = 0; if (dims) dims->clear(); while (Ty->isArrayTy()) { if (dims) dims->push_back(Ty->getArrayNumElements()); dim++; Ty = Ty->getArrayElementType(); } return dim; } // Delete unused CleanupInsts, restarting when changed // Return true if something was deleted bool CleanupUnusedValues(std::unordered_set<Instruction *> &CleanupInsts) { // - delete unused CleanupInsts, restarting when changed bool bAnyChanges = false; bool bChanged = false; do { bChanged = false; for (auto it = CleanupInsts.begin(); it != CleanupInsts.end();) { Instruction *I = *(it++); if (I->user_empty()) { // Add instructions operands CleanupInsts for (unsigned iOp = 0; iOp < I->getNumOperands(); iOp++) { if (Instruction *opI = dyn_cast<Instruction>(I->getOperand(iOp))) CleanupInsts.insert(opI); } I->eraseFromParent(); CleanupInsts.erase(I); bChanged = true; } } if (bChanged) bAnyChanges = true; } while (bChanged); return bAnyChanges; } // Helper class for legalizing resource use // Convert select/phi on resources to select/phi on index to GEP on GV. // Convert resource alloca to index alloca. // Assumes createHandleForLib has no select/phi class LegalizeResourceUseHelper { // Change: // gep1 = GEP gRes, i1 // res1 = load gep1 // gep2 = GEP gRes, i2 // gep3 = GEP gRes, i3 // gep4 = phi gep2, gep3 <-- handle select/phi on GEP // res4 = load gep4 // res5 = phi res1, res4 // res6 = load GEP gRes, 23 <-- handle constant GepExpression // res = select cnd2, res5, res6 // handle = createHandleForLib(res) // To: // i4 = phi i2, i3 // i5 = phi i1, i4 // i6 = select cnd, i5, 23 // gep = GEP gRes, i6 // res = load gep // handle = createHandleForLib(res) // Also handles alloca // resArray = alloca [2 x Resource] // gep1 = GEP gRes, i1 // res1 = load gep1 // gep2 = GEP gRes, i2 // gep3 = GEP gRes, i3 // phi4 = phi gep2, gep3 // res4 = load phi4 // gep5 = GEP resArray, 0 // gep6 = GEP resArray, 1 // store gep5, res1 // store gep6, res4 // gep7 = GEP resArray, i7 <-- dynamically index array // res = load gep7 // handle = createHandleForLib(res) // Desired result: // idxArray = alloca [2 x i32] // phi4 = phi i2, i3 // gep5 = GEP idxArray, 0 // gep6 = GEP idxArray, 1 // store gep5, i1 // store gep6, phi4 // gep7 = GEP idxArray, i7 // gep8 = GEP gRes, gep7 // res = load gep8 // handle = createHandleForLib(res) // Also handles multi-dim resource index and multi-dim resource array allocas // Basic algorithm: // - recursively mark each GV user with GV (ValueToResourceGV) // - verify only one GV used for any given value // - handle allocas by searching up from store for alloca // - then recursively mark alloca users // - ResToIdxReplacement keeps track of vector of indices that // will be used to replace a given resource value or pointer // - Next, create selects/phis for indices corresponding to // selects/phis on resource pointers or values. // - leave incoming index values undef for now // - Create index allocas to replace resource allocas // - Create GEPs on index allocas to replace GEPs on resource allocas // - Create index loads on index allocas to replace loads on resource alloca // GEP // - Fill in replacements for GEPs on resource GVs // - copy replacement index vectors to corresponding loads // - Create index stores to replace resource stores to alloca/GEPs // - Update selects/phis incoming index values // - SimplifyMerges: replace index phis/selects on same value with that value // - RemappedValues[phi/select] set to replacement value // - use LookupValue from now on when reading from ResToIdxReplacement // - Update handles by replacing load/GEP chains that go through select/phi // with direct GV GEP + load, with select/phi on GEP indices instead. public: ResourceUseErrors m_Errors; ValueToValueMap ValueToResourceGV; ValueToIdxMap ResToIdxReplacement; // Value sets we can use to iterate ValueSetVector Selects, GEPs, Stores, Handles; ValueSetVector Allocas, AllocaGEPs, AllocaLoads; #ifdef SUPPORT_SELECT_ON_ALLOCA ValueSetVector AllocaSelects; #endif std::unordered_set<Value *> NonUniformSet; // New index selects created by pass, so we can try simplifying later ValueSetVector NewSelects; // Values that have been replaced with other values need remapping ValueToValueMap RemappedValues; // Things to clean up if no users: std::unordered_set<Instruction *> CleanupInsts; GlobalVariable *LookupResourceGV(Value *V) { auto itGV = ValueToResourceGV.find(V); if (itGV == ValueToResourceGV.end()) return nullptr; return cast<GlobalVariable>(itGV->second); } // Follow RemappedValues, return input if not remapped Value *LookupValue(Value *V) { auto it = RemappedValues.find(V); SmallPtrSet<Value *, 4> visited; while (it != RemappedValues.end()) { // Cycles should not happen, but are bad if they do. if (visited.count(it->second)) { // When remapping values to be replaced, we add them to RemappedValues // so we don't use dead values stored in other sets/maps. Circular // remaps that should not happen DXASSERT(false, "otherwise, circular remapping"); llvm_unreachable("cycles detected in value remapping"); break; } V = it->second; it = RemappedValues.find(V); if (it != RemappedValues.end()) visited.insert(V); } return V; } bool AreLoadUsersTrivial(LoadInst *LI) { for (auto U : LI->users()) { if (CallInst *CI = dyn_cast<CallInst>(U)) { Function *F = CI->getCalledFunction(); DxilModule &DM = F->getParent()->GetDxilModule(); hlsl::OP *hlslOP = DM.GetOP(); if (hlslOP->IsDxilOpFunc(F)) { hlsl::OP::OpCodeClass opClass; if (hlslOP->GetOpCodeClass(F, opClass) && opClass == DXIL::OpCodeClass::CreateHandleForLib) { continue; } } } return false; } return true; } // This is used to quickly skip the common case where no work is needed bool AreGEPUsersTrivial(GEPOperator *GEP) { if (GlobalVariable *GV = LookupResourceGV(GEP)) { if (GEP->getPointerOperand() != LookupResourceGV(GEP)) return false; } for (auto U : GEP->users()) { if (LoadInst *LI = dyn_cast<LoadInst>(U)) { if (AreLoadUsersTrivial(LI)) continue; } return false; } return true; } // AssignResourceGVFromStore is used on pointer being stored to. // Follow GEP/Phi/Select up to Alloca, then CollectResourceGVUsers on Alloca void AssignResourceGVFromStore(GlobalVariable *GV, Value *V, SmallPtrSet<Value *, 4> &visited, bool bNonUniform) { // Prevent cycles as we search up if (visited.count(V) != 0) return; // Verify and skip if already processed auto it = ValueToResourceGV.find(V); if (it != ValueToResourceGV.end()) { if (it->second != GV) { m_Errors.ReportError(ResourceUseErrors::GVConflicts, V); } return; } if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { CollectResourceGVUsers(GV, AI, /*bAlloca*/ true, bNonUniform); return; } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { // follow the pointer up AssignResourceGVFromStore(GV, GEP->getPointerOperand(), visited, bNonUniform); return; } else if (PHINode *Phi = dyn_cast<PHINode>(V)) { #ifdef SUPPORT_SELECT_ON_ALLOCA // follow all incoming values for (auto it : Phi->operand_values()) AssignResourceGVFromStore(GV, it, visited, bNonUniform); #else m_Errors.ReportError(ResourceUseErrors::AllocaUserDisallowed, V); #endif return; } else if (SelectInst *Sel = dyn_cast<SelectInst>(V)) { #ifdef SUPPORT_SELECT_ON_ALLOCA // follow all incoming values AssignResourceGVFromStore(GV, Sel->getTrueValue(), visited, bNonUniform); AssignResourceGVFromStore(GV, Sel->getFalseValue(), visited, bNonUniform); #else m_Errors.ReportError(ResourceUseErrors::AllocaUserDisallowed, V); #endif return; } else if (isa<GlobalVariable>(V) && cast<GlobalVariable>(V)->getLinkage() == GlobalVariable::LinkageTypes::InternalLinkage) { // this is writing to global static, which is disallowed at this point. m_Errors.ReportError(ResourceUseErrors::StaticGVUsed, V); return; } else { // Most likely storing to output parameter m_Errors.ReportError(ResourceUseErrors::UserCallsWithResources, V); return; } return; } // Recursively mark values with GV, following users. // Starting value V should be GV itself. // Returns true if value/uses reference no other GV in map. void CollectResourceGVUsers(GlobalVariable *GV, Value *V, bool bAlloca = false, bool bNonUniform = false) { // Recursively tag value V and its users as using GV. auto it = ValueToResourceGV.find(V); if (it != ValueToResourceGV.end()) { if (it->second != GV) { m_Errors.ReportError(ResourceUseErrors::GVConflicts, V); #ifdef SUPPORT_SELECT_ON_ALLOCA } else { // if select/phi, make sure bAlloca is consistent if (isa<PHINode>(V) || isa<SelectInst>(V)) if ((bAlloca && AllocaSelects.count(V) == 0) || (!bAlloca && Selects.count(V) == 0)) m_Errors.ReportError(ResourceUseErrors::AllocaSelectConflict, V); #endif } return; } ValueToResourceGV[V] = GV; if (GV == V) { // Just add and recurse users // make sure bAlloca is clear for users bAlloca = false; } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { if (bAlloca) AllocaGEPs.insert(GEP); else if (!AreGEPUsersTrivial(GEP)) GEPs.insert(GEP); else return; // Optimization: skip trivial GV->GEP->load->createHandle if (GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(GEP)) { if (DxilMDHelper::IsMarkedNonUniform(GEPInst)) bNonUniform = true; } } else if (LoadInst *LI = dyn_cast<LoadInst>(V)) { if (bAlloca) AllocaLoads.insert(LI); // clear bAlloca for users bAlloca = false; if (bNonUniform) NonUniformSet.insert(LI); } else if (StoreInst *SI = dyn_cast<StoreInst>(V)) { Stores.insert(SI); if (!bAlloca) { // Find and mark allocas this store could be storing to SmallPtrSet<Value *, 4> visited; AssignResourceGVFromStore(GV, SI->getPointerOperand(), visited, bNonUniform); } return; } else if (PHINode *Phi = dyn_cast<PHINode>(V)) { if (bAlloca) { #ifdef SUPPORT_SELECT_ON_ALLOCA AllocaSelects.insert(Phi); #else m_Errors.ReportError(ResourceUseErrors::AllocaUserDisallowed, V); #endif } else { Selects.insert(Phi); } } else if (SelectInst *Sel = dyn_cast<SelectInst>(V)) { if (bAlloca) { #ifdef SUPPORT_SELECT_ON_ALLOCA AllocaSelects.insert(Sel); #else m_Errors.ReportError(ResourceUseErrors::AllocaUserDisallowed, V); #endif } else { Selects.insert(Sel); } } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { Allocas.insert(AI); // set bAlloca for users bAlloca = true; } else if (Constant *C = dyn_cast<Constant>(V)) { // skip @llvm.used entry return; } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(V)) { DXASSERT(onlyUsedByLifetimeMarkers(BCI), "expected bitcast to only be used by lifetime intrinsics"); return; } else if (bAlloca) { m_Errors.ReportError(ResourceUseErrors::AllocaUserDisallowed, V); } else { // Must be createHandleForLib or user function call. CallInst *CI = cast<CallInst>(V); Function *F = CI->getCalledFunction(); DxilModule &DM = GV->getParent()->GetDxilModule(); hlsl::OP *hlslOP = DM.GetOP(); if (hlslOP->IsDxilOpFunc(F)) { hlsl::OP::OpCodeClass opClass; if (hlslOP->GetOpCodeClass(F, opClass) && (opClass == DXIL::OpCodeClass::CreateHandleForLib)) { Handles.insert(CI); if (bNonUniform) NonUniformSet.insert(CI); return; } } // This could be user call with resource param, which is disallowed for // lib_6_3 m_Errors.ReportError(ResourceUseErrors::UserCallsWithResources, V); return; } // Recurse users for (auto U : V->users()) CollectResourceGVUsers(GV, U, bAlloca, bNonUniform); return; } // Remove conflicting values from sets before // transforming the remainder. void RemoveConflictingValue(Value *V) { bool bRemoved = false; if (isa<GEPOperator>(V)) { bRemoved = GEPs.remove(V) || AllocaGEPs.remove(V); } else if (isa<LoadInst>(V)) { bRemoved = AllocaLoads.remove(V); } else if (isa<StoreInst>(V)) { bRemoved = Stores.remove(V); } else if (isa<PHINode>(V) || isa<SelectInst>(V)) { bRemoved = Selects.remove(V); #ifdef SUPPORT_SELECT_ON_ALLOCA bRemoved |= AllocaSelects.remove(V); #endif } else if (isa<AllocaInst>(V)) { bRemoved = Allocas.remove(V); } else if (isa<CallInst>(V)) { bRemoved = Handles.remove(V); return; // don't recurse } if (bRemoved) { // Recurse users for (auto U : V->users()) RemoveConflictingValue(U); } } void RemoveConflicts() { for (auto V : m_Errors.ErrorSets[ResourceUseErrors::GVConflicts]) { RemoveConflictingValue(V); ValueToResourceGV.erase(V); } } void CreateSelects() { if (Selects.empty() #ifdef SUPPORT_SELECT_ON_ALLOCA && AllocaSelects.empty() #endif ) return; LLVMContext &Ctx = #ifdef SUPPORT_SELECT_ON_ALLOCA Selects.empty() ? AllocaSelects[0]->getContext() : #endif Selects[0]->getContext(); Type *i32Ty = IntegerType::getInt32Ty(Ctx); #ifdef SUPPORT_SELECT_ON_ALLOCA for (auto &SelectSet : {Selects, AllocaSelects}) { bool bAlloca = !(&SelectSet == &Selects); #else for (auto &SelectSet : {Selects}) { #endif for (auto pValue : SelectSet) { Type *SelectTy = i32Ty; #ifdef SUPPORT_SELECT_ON_ALLOCA // For alloca case, type needs to match dimensionality of incoming value if (bAlloca) { // TODO: Not sure if this case will actually work // (or whether it can even be generated from HLSL) Type *Ty = pValue->getType(); SmallVector<unsigned, 4> dims; unsigned dim = CountArrayDimensions(Ty, &dims); for (unsigned i = 0; i < dim; i++) SelectTy = ArrayType::get(SelectTy, (uint64_t)dims[dim - i - 1]); if (Ty->isPointerTy()) SelectTy = PointerType::get(SelectTy, 0); } #endif Value *UndefValue = UndefValue::get(SelectTy); if (PHINode *Phi = dyn_cast<PHINode>(pValue)) { GlobalVariable *GV = LookupResourceGV(Phi); if (!GV) continue; // skip value removed due to conflict IRBuilder<> PhiBuilder(Phi); unsigned gvDim = CountArrayDimensions(GV->getType()); IndexVector &idxVector = ResToIdxReplacement[Phi]; idxVector.resize(gvDim, nullptr); unsigned numIncoming = Phi->getNumIncomingValues(); for (unsigned i = 0; i < gvDim; i++) { PHINode *newPhi = PhiBuilder.CreatePHI(SelectTy, numIncoming); NewSelects.insert(newPhi); idxVector[i] = newPhi; for (unsigned j = 0; j < numIncoming; j++) { // Set incoming values to undef until next pass newPhi->addIncoming(UndefValue, Phi->getIncomingBlock(j)); } } } else if (SelectInst *Sel = dyn_cast<SelectInst>(pValue)) { GlobalVariable *GV = LookupResourceGV(Sel); if (!GV) continue; // skip value removed due to conflict IRBuilder<> Builder(Sel); unsigned gvDim = CountArrayDimensions(GV->getType()); IndexVector &idxVector = ResToIdxReplacement[Sel]; idxVector.resize(gvDim, nullptr); for (unsigned i = 0; i < gvDim; i++) { Value *newSel = Builder.CreateSelect(Sel->getCondition(), UndefValue, UndefValue); NewSelects.insert(newSel); idxVector[i] = newSel; } } else { DXASSERT(false, "otherwise, non-select/phi in Selects set"); } } } } // Create index allocas to replace resource allocas void CreateIndexAllocas() { if (Allocas.empty()) return; Type *i32Ty = IntegerType::getInt32Ty(Allocas[0]->getContext()); for (auto pValue : Allocas) { AllocaInst *pAlloca = cast<AllocaInst>(pValue); GlobalVariable *GV = LookupResourceGV(pAlloca); if (!GV) continue; // skip value removed due to conflict IRBuilder<> AllocaBuilder(pAlloca); unsigned gvDim = CountArrayDimensions(GV->getType()); SmallVector<unsigned, 4> dimVector; unsigned allocaTyDim = CountArrayDimensions(pAlloca->getType(), &dimVector); Type *pIndexType = i32Ty; for (unsigned i = 0; i < allocaTyDim; i++) { pIndexType = ArrayType::get(pIndexType, dimVector[allocaTyDim - i - 1]); } Value *arraySize = pAlloca->getArraySize(); IndexVector &idxVector = ResToIdxReplacement[pAlloca]; idxVector.resize(gvDim, nullptr); for (unsigned i = 0; i < gvDim; i++) { AllocaInst *pAlloca = AllocaBuilder.CreateAlloca(pIndexType, arraySize); pAlloca->setAlignment(4); idxVector[i] = pAlloca; } } } // Add corresponding GEPs for index allocas IndexVector &ReplaceAllocaGEP(GetElementPtrInst *GEP) { IndexVector &idxVector = ResToIdxReplacement[GEP]; if (!idxVector.empty()) return idxVector; Value *Ptr = GEP->getPointerOperand(); // Recurse for partial GEPs IndexVector &ptrIndices = isa<GetElementPtrInst>(Ptr) ? ReplaceAllocaGEP(cast<GetElementPtrInst>(Ptr)) : ResToIdxReplacement[Ptr]; IRBuilder<> Builder(GEP); SmallVector<Value *, 4> gepIndices; for (auto it = GEP->idx_begin(), idxEnd = GEP->idx_end(); it != idxEnd; it++) gepIndices.push_back(*it); idxVector.resize(ptrIndices.size(), nullptr); for (unsigned i = 0; i < ptrIndices.size(); i++) { idxVector[i] = Builder.CreateInBoundsGEP(ptrIndices[i], gepIndices); } return idxVector; } void ReplaceAllocaGEPs() { for (auto V : AllocaGEPs) { ReplaceAllocaGEP(cast<GetElementPtrInst>(V)); } } void ReplaceAllocaLoads() { for (auto V : AllocaLoads) { LoadInst *LI = cast<LoadInst>(V); Value *Ptr = LI->getPointerOperand(); IRBuilder<> Builder(LI); IndexVector &idxVector = ResToIdxReplacement[V]; IndexVector &ptrIndices = ResToIdxReplacement[Ptr]; idxVector.resize(ptrIndices.size(), nullptr); for (unsigned i = 0; i < ptrIndices.size(); i++) { idxVector[i] = Builder.CreateLoad(ptrIndices[i]); } } } // Add GEP to ResToIdxReplacement with indices from incoming + GEP IndexVector &ReplaceGVGEPs(GEPOperator *GEP) { IndexVector &idxVector = ResToIdxReplacement[GEP]; // Skip if already done // (we recurse into partial GEP and iterate all GEPs) if (!idxVector.empty()) return idxVector; Type *i32Ty = IntegerType::getInt32Ty(GEP->getContext()); Constant *Zero = Constant::getIntegerValue(i32Ty, APInt(32, 0)); Value *Ptr = GEP->getPointerOperand(); unsigned idx = 0; if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { unsigned gvDim = CountArrayDimensions(GV->getType()); idxVector.resize(gvDim, Zero); } else if (isa<GEPOperator>(Ptr) || isa<PHINode>(Ptr) || isa<SelectInst>(Ptr)) { // Recurse for partial GEPs IndexVector &ptrIndices = isa<GEPOperator>(Ptr) ? ReplaceGVGEPs(cast<GEPOperator>(Ptr)) : ResToIdxReplacement[Ptr]; unsigned ptrDim = CountArrayDimensions(Ptr->getType()); unsigned gvDim = ptrIndices.size(); DXASSERT( ptrDim <= gvDim, "otherwise incoming pointer has more dimensions than associated GV"); unsigned gepStart = gvDim - ptrDim; // Copy indices and add ours idxVector.resize(ptrIndices.size(), Zero); for (; idx < gepStart; idx++) idxVector[idx] = ptrIndices[idx]; } if (GEP->hasIndices()) { auto itIdx = GEP->idx_begin(); ++itIdx; // Always skip leading zero (we don't support GV+n pointer arith) while (itIdx != GEP->idx_end()) idxVector[idx++] = *itIdx++; } return idxVector; } // Add GEPs to ResToIdxReplacement and update loads void ReplaceGVGEPs() { if (GEPs.empty()) return; for (auto V : GEPs) { GEPOperator *GEP = cast<GEPOperator>(V); IndexVector &gepVector = ReplaceGVGEPs(GEP); for (auto U : GEP->users()) { if (LoadInst *LI = dyn_cast<LoadInst>(U)) { // Just copy incoming indices ResToIdxReplacement[LI] = gepVector; } } } } // Create new index stores for incoming indices void ReplaceStores() { // generate stores of incoming indices to corresponding index pointers if (Stores.empty()) return; Type *i32Ty = IntegerType::getInt32Ty(Stores[0]->getContext()); for (auto V : Stores) { StoreInst *SI = cast<StoreInst>(V); IRBuilder<> Builder(SI); IndexVector &idxVector = ResToIdxReplacement[SI]; Value *Ptr = SI->getPointerOperand(); Value *Val = SI->getValueOperand(); IndexVector &ptrIndices = ResToIdxReplacement[Ptr]; IndexVector &valIndices = ResToIdxReplacement[Val]; // If Val is not found, it is treated as an undef value that will // translate to an undef index, which may still be valid if it's never // used. Value *UndefIndex = valIndices.size() > 0 ? nullptr : UndefValue::get(i32Ty); DXASSERT_NOMSG(valIndices.size() == 0 || ptrIndices.size() == valIndices.size()); idxVector.resize(ptrIndices.size(), nullptr); for (unsigned i = 0; i < idxVector.size(); i++) { idxVector[i] = Builder.CreateStore( UndefIndex ? UndefIndex : valIndices[i], ptrIndices[i]); } } } // For each Phi/Select: update matching incoming values for new phis void UpdateSelects() { if (Selects.empty()) return; Type *i32Ty = IntegerType::getInt32Ty(Selects[0]->getContext()); for (auto V : Selects) { // update incoming index values corresponding to incoming resource values IndexVector &idxVector = ResToIdxReplacement[V]; Instruction *I = cast<Instruction>(V); unsigned numOperands = I->getNumOperands(); unsigned startOp = isa<PHINode>(V) ? 0 : 1; for (unsigned iOp = startOp; iOp < numOperands; iOp++) { Value *Val = I->getOperand(iOp); IndexVector &incomingIndices = ResToIdxReplacement[Val]; // If Val is not found, it is treated as an undef value that will // translate to an undef index, which may still be valid if it's never // used. Value *UndefIndex = incomingIndices.size() > 0 ? nullptr : UndefValue::get(i32Ty); DXASSERT_NOMSG(incomingIndices.size() == 0 || idxVector.size() == incomingIndices.size()); for (unsigned i = 0; i < idxVector.size(); i++) { // must be instruction (phi/select) Instruction *indexI = cast<Instruction>(idxVector[i]); indexI->setOperand(iOp, UndefIndex ? UndefIndex : incomingIndices[i]); } // Now clear incoming operand (adding to cleanup) to break cycles if (Instruction *OpI = dyn_cast<Instruction>(I->getOperand(iOp))) CleanupInsts.insert(OpI); I->setOperand(iOp, UndefValue::get(I->getType())); } } } // ReplaceHandles // - iterate handles // - insert GEP using new indices associated with resource value // - load resource from new GEP // - replace resource use in createHandleForLib with new load // Assumes: no users of handle are phi/select or store void ReplaceHandles() { if (Handles.empty()) return; Type *i32Ty = IntegerType::getInt32Ty(Handles[0]->getContext()); Constant *Zero = Constant::getIntegerValue(i32Ty, APInt(32, 0)); for (auto V : Handles) { CallInst *CI = cast<CallInst>(V); DxilInst_CreateHandleForLib createHandle(CI); Value *res = createHandle.get_Resource(); // Skip extra work if nothing between load and create handle if (LoadInst *LI = dyn_cast<LoadInst>(res)) { Value *Ptr = LI->getPointerOperand(); if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) Ptr = GEP->getPointerOperand(); if (isa<GlobalVariable>(Ptr)) continue; } GlobalVariable *GV = LookupResourceGV(res); if (!GV) continue; // skip value removed due to conflict IRBuilder<> Builder(CI); IndexVector &idxVector = ResToIdxReplacement[res]; DXASSERT(idxVector.size() == CountArrayDimensions(GV->getType()), "replacements empty or invalid"); SmallVector<Value *, 4> gepIndices; gepIndices.push_back(Zero); for (auto idxVal : idxVector) gepIndices.push_back(LookupValue(idxVal)); Value *GEP = Builder.CreateInBoundsGEP(GV, gepIndices); // Mark new GEP instruction non-uniform if necessary if (NonUniformSet.count(res) != 0 || NonUniformSet.count(CI) != 0) if (GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(GEP)) DxilMDHelper::MarkNonUniform(GEPInst); LoadInst *LI = Builder.CreateLoad(GEP); createHandle.set_Resource(LI); if (Instruction *resI = dyn_cast<Instruction>(res)) CleanupInsts.insert(resI); } } void SimplifyMerges() { // Loop if changed bool bChanged = false; do { bChanged = false; for (auto V : NewSelects) { if (LookupValue(V) != V) continue; Instruction *I = cast<Instruction>(V); unsigned startOp = isa<PHINode>(I) ? 0 : 1; Value *newV = dxilutil::MergeSelectOnSameValue( cast<Instruction>(V), startOp, I->getNumOperands()); if (newV) { RemappedValues[V] = newV; bChanged = true; } } } while (bChanged); } void CleanupDeadInsts() { // Assuming everything was successful: // delete stores to allocas to remove cycles for (auto V : Stores) { StoreInst *SI = cast<StoreInst>(V); if (Instruction *I = dyn_cast<Instruction>(SI->getValueOperand())) CleanupInsts.insert(I); if (Instruction *I = dyn_cast<Instruction>(SI->getPointerOperand())) CleanupInsts.insert(I); SI->eraseFromParent(); } CleanupUnusedValues(CleanupInsts); } void VerifyComplete(DxilModule &DM) { // Check that all handles now resolve to a global variable, otherwise, // they are likely loading from resource function parameter, which // is disallowed. hlsl::OP *hlslOP = DM.GetOP(); for (Function &F : DM.GetModule()->functions()) { if (hlslOP->IsDxilOpFunc(&F)) { hlsl::OP::OpCodeClass opClass; if (hlslOP->GetOpCodeClass(&F, opClass) && opClass == DXIL::OpCodeClass::CreateHandleForLib) { for (auto U : F.users()) { CallInst *CI = cast<CallInst>(U); if (m_Errors.ErrorUsers.count(CI)) continue; // Error already reported DxilInst_CreateHandleForLib createHandle(CI); Value *res = createHandle.get_Resource(); LoadInst *LI = dyn_cast<LoadInst>(res); if (LI) { Value *Ptr = LI->getPointerOperand(); if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) Ptr = GEP->getPointerOperand(); if (isa<GlobalVariable>(Ptr)) continue; } // handle wasn't processed // Right now, the most likely cause is user call with resources, but // this should be updated if there are other reasons for this to // happen. m_Errors.ReportError(ResourceUseErrors::UserCallsWithResources, U); } } } } } // Fix resource global variable properties to external constant bool SetExternalConstant(GlobalVariable *GV) { if (GV->hasInitializer() || !GV->isConstant() || GV->getLinkage() != GlobalVariable::LinkageTypes::ExternalLinkage) { GV->setInitializer(nullptr); GV->setConstant(true); GV->setLinkage(GlobalVariable::LinkageTypes::ExternalLinkage); return true; } return false; } bool CollectResources(DxilModule &DM) { bool bChanged = false; for (const auto &res : DM.GetCBuffers()) { if (GlobalVariable *GV = dyn_cast<GlobalVariable>(res->GetGlobalSymbol())) { bChanged |= SetExternalConstant(GV); CollectResourceGVUsers(GV, GV); } } for (const auto &res : DM.GetSRVs()) { if (GlobalVariable *GV = dyn_cast<GlobalVariable>(res->GetGlobalSymbol())) { bChanged |= SetExternalConstant(GV); CollectResourceGVUsers(GV, GV); } } for (const auto &res : DM.GetUAVs()) { if (GlobalVariable *GV = dyn_cast<GlobalVariable>(res->GetGlobalSymbol())) { bChanged |= SetExternalConstant(GV); CollectResourceGVUsers(GV, GV); } } for (const auto &res : DM.GetSamplers()) { if (GlobalVariable *GV = dyn_cast<GlobalVariable>(res->GetGlobalSymbol())) { bChanged |= SetExternalConstant(GV); CollectResourceGVUsers(GV, GV); } } return bChanged; } void DoTransform(hlsl::OP *hlslOP) { RemoveConflicts(); CreateSelects(); CreateIndexAllocas(); ReplaceAllocaGEPs(); ReplaceAllocaLoads(); ReplaceGVGEPs(); ReplaceStores(); UpdateSelects(); SimplifyMerges(); ReplaceHandles(); if (!m_Errors.ErrorsReported()) CleanupDeadInsts(); } bool ErrorsReported() { return m_Errors.ErrorsReported(); } bool runOnModule(llvm::Module &M) { DxilModule &DM = M.GetOrCreateDxilModule(); hlsl::OP *hlslOP = DM.GetOP(); bool bChanged = CollectResources(DM); // If no selects or allocas are involved, there isn't anything to do if (Selects.empty() && Allocas.empty()) return bChanged; DoTransform(hlslOP); VerifyComplete(DM); return true; } }; class DxilLegalizeResources : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilLegalizeResources() : ModulePass(ID) {} StringRef getPassName() const override { return "DXIL Legalize Resource Use"; } bool runOnModule(Module &M) override { LegalizeResourceUseHelper helper; return helper.runOnModule(M); } private: }; } // namespace char DxilLegalizeResources::ID = 0; ModulePass *llvm::createDxilLegalizeResources() { return new DxilLegalizeResources(); } INITIALIZE_PASS(DxilLegalizeResources, "hlsl-dxil-legalize-resources", "DXIL legalize resource use", false, false) bool DxilLowerCreateHandleForLib::RemovePhiOnResource() { LegalizeResourceUseHelper helper; bool bChanged = helper.runOnModule(*m_DM->GetModule()); if (helper.ErrorsReported()) m_bLegalizationFailed = true; return bChanged; } // LegacyLayout. namespace { StructType * UpdateStructTypeForLegacyLayout(StructType *ST, DxilTypeSystem &TypeSys, Module &M, bool includeTopLevelResource = false); Type *UpdateFieldTypeForLegacyLayout(Type *Ty, DxilFieldAnnotation &annotation, DxilTypeSystem &TypeSys, Module &M) { DXASSERT(!Ty->isPointerTy(), "struct field should not be a pointer"); if (Ty->isArrayTy()) { Type *EltTy = Ty->getArrayElementType(); Type *UpdatedTy = UpdateFieldTypeForLegacyLayout(EltTy, annotation, TypeSys, M); if (EltTy == UpdatedTy) return Ty; else if (UpdatedTy) return ArrayType::get(UpdatedTy, Ty->getArrayNumElements()); else return nullptr; } else if (hlsl::HLMatrixType::isa(Ty)) { DXASSERT(annotation.HasMatrixAnnotation(), "must a matrix"); HLMatrixType MatTy = HLMatrixType::cast(Ty); unsigned rows = MatTy.getNumRows(); unsigned cols = MatTy.getNumColumns(); Type *EltTy = MatTy.getElementTypeForReg(); // Get cols and rows from annotation. const DxilMatrixAnnotation &matrix = annotation.GetMatrixAnnotation(); if (matrix.Orientation == MatrixOrientation::RowMajor) { rows = matrix.Rows; cols = matrix.Cols; } else { DXASSERT_NOMSG(matrix.Orientation == MatrixOrientation::ColumnMajor); cols = matrix.Rows; rows = matrix.Cols; } EltTy = UpdateFieldTypeForLegacyLayout(EltTy, annotation, TypeSys, M); Type *rowTy = VectorType::get(EltTy, cols); // Matrix should be aligned like array if rows > 1, // otherwise, it's just like a vector. if (rows > 1) return ArrayType::get(rowTy, rows); else return rowTy; } else if (StructType *ST = dyn_cast<StructType>(Ty)) { return UpdateStructTypeForLegacyLayout(ST, TypeSys, M); } else if (FixedVectorType *VT = dyn_cast<FixedVectorType>(Ty)) { Type *EltTy = VT->getElementType(); Type *UpdatedTy = UpdateFieldTypeForLegacyLayout(EltTy, annotation, TypeSys, M); if (EltTy == UpdatedTy) return Ty; else return VectorType::get(UpdatedTy, VT->getNumElements()); } else { Type *i32Ty = Type::getInt32Ty(Ty->getContext()); // Basic types. if (Ty->isHalfTy()) { return Type::getFloatTy(Ty->getContext()); } else if (IntegerType *ITy = dyn_cast<IntegerType>(Ty)) { if (ITy->getBitWidth() < 32) return i32Ty; else return Ty; } else return Ty; } } StructType *UpdateStructTypeForLegacyLayout(StructType *ST, DxilTypeSystem &TypeSys, Module &M, bool includeTopLevelResource) { bool bUpdated = false; unsigned fieldsCount = ST->getNumElements(); std::vector<Type *> fieldTypes; fieldTypes.reserve(fieldsCount); DxilStructAnnotation *SA = TypeSys.GetStructAnnotation(ST); if (!includeTopLevelResource && dxilutil::IsHLSLResourceType(ST)) return nullptr; // After reflection is stripped from library, this will be null if no update // is required. if (!SA) { return ST; } if (SA->IsEmptyStruct()) { return ST; } // Resource fields must be deleted, since they don't actually // show up in the structure layout. // fieldMap maps from new field index to old field index for porting // annotations std::vector<unsigned> fieldMap; fieldMap.reserve(fieldsCount); for (unsigned i = 0; i < fieldsCount; i++) { Type *EltTy = ST->getElementType(i); Type *UpdatedTy = UpdateFieldTypeForLegacyLayout( EltTy, SA->GetFieldAnnotation(i), TypeSys, M); if (UpdatedTy != nullptr) { fieldMap.push_back(i); fieldTypes.push_back(UpdatedTy); } if (EltTy != UpdatedTy) bUpdated = true; } if (!bUpdated) { return ST; } else { std::string legacyName = std::string(DXIL::kHostLayoutTypePrefix) + ST->getName().str(); if (StructType *legacyST = M.getTypeByName(legacyName)) return legacyST; StructType *NewST = StructType::create(ST->getContext(), fieldTypes, legacyName); // Only add annotation if struct is not empty. if (NewST->getNumElements() > 0) { DxilStructAnnotation *NewSA = TypeSys.AddStructAnnotation(NewST); // Clone annotation. NewSA->SetCBufferSize(SA->GetCBufferSize()); NewSA->SetNumTemplateArgs(SA->GetNumTemplateArgs()); for (unsigned i = 0; i < SA->GetNumTemplateArgs(); i++) { NewSA->GetTemplateArgAnnotation(i) = SA->GetTemplateArgAnnotation(i); } // Remap with deleted resource fields for (unsigned i = 0; i < NewSA->GetNumFields(); i++) { NewSA->GetFieldAnnotation(i) = SA->GetFieldAnnotation(fieldMap[i]); } TypeSys.FinishStructAnnotation(*NewSA); } return NewST; } } bool UpdateStructTypeForLegacyLayout(DxilResourceBase &Res, DxilTypeSystem &TypeSys, DxilModule &DM) { Module &M = *DM.GetModule(); Constant *Symbol = Res.GetGlobalSymbol(); Type *ElemTy = Res.GetHLSLType()->getPointerElementType(); // Support Array of ConstantBuffer/StructuredBuffer. llvm::SmallVector<unsigned, 4> arrayDims; ElemTy = dxilutil::StripArrayTypes(ElemTy, &arrayDims); StructType *ST = cast<StructType>(ElemTy); if (ST->isOpaque()) { DXASSERT(Res.GetClass() == DxilResourceBase::Class::CBuffer, "Only cbuffer can have opaque struct."); return false; } Type *UpdatedST = UpdateStructTypeForLegacyLayout( ST, TypeSys, M, Res.GetKind() == DXIL::ResourceKind::StructuredBuffer); if (ST != UpdatedST) { // Support Array of ConstantBuffer/StructuredBuffer. Type *UpdatedTy = dxilutil::WrapInArrayTypes(UpdatedST, arrayDims); GlobalVariable *NewGV = cast<GlobalVariable>( M.getOrInsertGlobal(Symbol->getName().str() + "_legacy", UpdatedTy)); Res.SetGlobalSymbol(NewGV); Res.SetHLSLType(NewGV->getType()); OP *hlslOP = DM.GetOP(); if (DM.GetShaderModel()->IsLib()) { TypeSys.EraseStructAnnotation(ST); // If it's a library, we need to replace the GV which involves a few // replacements Function *NF = hlslOP->GetOpFunc(hlsl::OP::OpCode::CreateHandleForLib, UpdatedST); Value *opArg = hlslOP->GetI32Const((unsigned)hlsl::OP::OpCode::CreateHandleForLib); auto replaceResLd = [&NF, &opArg](LoadInst *ldInst, Value *NewPtr) { if (!ldInst->user_empty()) { IRBuilder<> Builder = IRBuilder<>(ldInst); LoadInst *newLoad = Builder.CreateLoad(NewPtr); Value *args[] = {opArg, newLoad}; for (auto user = ldInst->user_begin(), E = ldInst->user_end(); user != E;) { CallInst *CI = cast<CallInst>(*(user++)); CallInst *newCI = CallInst::Create(NF, args, "", CI); CI->replaceAllUsesWith(newCI); CI->eraseFromParent(); } } ldInst->eraseFromParent(); }; // Merge GEP to simplify replace old GV. if (!arrayDims.empty()) dxilutil::MergeGepUse(Symbol); // Replace old GV. for (auto UserIt = Symbol->user_begin(), userEnd = Symbol->user_end(); UserIt != userEnd;) { Value *User = *(UserIt++); if (LoadInst *ldInst = dyn_cast<LoadInst>(User)) { replaceResLd(ldInst, NewGV); } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(User)) { IRBuilder<> Builder(GEP->getContext()); StringRef Name = ""; if (Instruction *I = dyn_cast<Instruction>(GEP)) { Builder.SetInsertPoint(I); Name = GEP->getName(); } SmallVector<Value *, 8> Indices(GEP->idx_begin(), GEP->idx_end()); Value *NewPtr = Builder.CreateGEP(NewGV, Indices); for (auto GEPUserIt = GEP->user_begin(), GEPuserEnd = GEP->user_end(); GEPUserIt != GEPuserEnd;) { Value *User = *(GEPUserIt++); if (LoadInst *ldInst = dyn_cast<LoadInst>(User)) { replaceResLd(ldInst, NewPtr); } else { User->dump(); DXASSERT(0, "unsupported user when update resouce type"); } } if (Instruction *I = dyn_cast<Instruction>(GEP)) I->eraseFromParent(); } else { User->dump(); DXASSERT(0, "unsupported user when update resouce type"); } } } else { // If not a library, the GV should be deleted for (auto UserIt = Symbol->user_begin(); UserIt != Symbol->user_end();) { Value *User = *(UserIt++); if (Instruction *I = dyn_cast<Instruction>(User)) { if (!User->user_empty()) I->replaceAllUsesWith(UndefValue::get(I->getType())); I->eraseFromParent(); } else { ConstantExpr *CE = cast<ConstantExpr>(User); if (!CE->user_empty()) CE->replaceAllUsesWith(UndefValue::get(CE->getType())); } } } Symbol->removeDeadConstantUsers(); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Symbol)) GV->eraseFromParent(); return true; } return false; } bool UpdateStructTypeForLegacyLayoutOnDM(DxilModule &DM) { DxilTypeSystem &TypeSys = DM.GetTypeSystem(); bool bChanged = false; for (auto &CBuf : DM.GetCBuffers()) { bChanged |= UpdateStructTypeForLegacyLayout(*CBuf.get(), TypeSys, DM); } for (auto &UAV : DM.GetUAVs()) { if (DXIL::IsStructuredBuffer(UAV->GetKind())) bChanged |= UpdateStructTypeForLegacyLayout(*UAV.get(), TypeSys, DM); } for (auto &SRV : DM.GetSRVs()) { if (SRV->IsStructuredBuffer() || SRV->IsTBuffer()) bChanged |= UpdateStructTypeForLegacyLayout(*SRV.get(), TypeSys, DM); } return bChanged; } } // namespace void DxilLowerCreateHandleForLib::FailOnPoisonResources() { // A previous pass replaced all undef resources with constant zero resources. // If those made it here, the program is malformed. for (Function &Func : this->m_DM->GetModule()->functions()) { hlsl::OP::OpCodeClass OpcodeClass; if (m_DM->GetOP()->GetOpCodeClass(&Func, OpcodeClass) && OpcodeClass == OP::OpCodeClass::CreateHandleForLib) { Type *ResTy = Func.getFunctionType()->getParamType( DXIL::OperandIndex::kCreateHandleForLibResOpIdx); Constant *PoisonRes = ConstantAggregateZero::get(ResTy); for (User *PoisonUser : PoisonRes->users()) if (Instruction *PoisonUserInst = dyn_cast<Instruction>(PoisonUser)) dxilutil::EmitResMappingError(PoisonUserInst); } } } bool DxilLowerCreateHandleForLib::UpdateStructTypeForLegacyLayout() { return UpdateStructTypeForLegacyLayoutOnDM(*m_DM); } // Change ResourceSymbol to undef if don't need. void DxilLowerCreateHandleForLib::UpdateResourceSymbols() { auto UpdateResourceSymbol = [](DxilResourceBase *res) { if (GlobalVariable *GV = dyn_cast<GlobalVariable>(res->GetGlobalSymbol())) { GV->removeDeadConstantUsers(); DXASSERT(GV->user_empty(), "else resource not lowered"); res->SetGlobalSymbol(UndefValue::get(GV->getType())); if (GV->user_empty()) GV->eraseFromParent(); } }; for (auto &&C : m_DM->GetCBuffers()) { UpdateResourceSymbol(C.get()); } for (auto &&Srv : m_DM->GetSRVs()) { UpdateResourceSymbol(Srv.get()); } for (auto &&Uav : m_DM->GetUAVs()) { UpdateResourceSymbol(Uav.get()); } for (auto &&S : m_DM->GetSamplers()) { UpdateResourceSymbol(S.get()); } } // Lower createHandleForLib namespace { Value *flattenGepIdx(GEPOperator *GEP) { Value *idx = nullptr; if (GEP->getNumIndices() == 2) { // one dim array of resource idx = (GEP->idx_begin() + 1)->get(); } else { gep_type_iterator GEPIt = gep_type_begin(GEP), E = gep_type_end(GEP); // Must be instruction for multi dim array. std::unique_ptr<IRBuilder<>> Builder; if (GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(GEP)) { Builder = llvm::make_unique<IRBuilder<>>(GEPInst); } else { Builder = llvm::make_unique<IRBuilder<>>(GEP->getContext()); } for (; GEPIt != E; ++GEPIt) { if (GEPIt->isArrayTy()) { unsigned arraySize = GEPIt->getArrayNumElements(); Value *tmpIdx = GEPIt.getOperand(); if (idx == nullptr) idx = tmpIdx; else { idx = Builder->CreateMul(idx, Builder->getInt32(arraySize)); idx = Builder->CreateAdd(idx, tmpIdx); } } } } return idx; } } // namespace void DxilLowerCreateHandleForLib::ReplaceResourceUserWithHandle( DxilResource &res, LoadInst *load, Instruction *handle) { for (auto resUser = load->user_begin(), E = load->user_end(); resUser != E;) { Value *V = *(resUser++); CallInst *CI = dyn_cast<CallInst>(V); DxilInst_CreateHandleForLib createHandle(CI); DXASSERT(createHandle, "must be createHandle"); CI->replaceAllUsesWith(handle); CI->eraseFromParent(); } if (res.GetClass() == DXIL::ResourceClass::UAV) { // Before this pass, the global resources might not have been mapped with // all the uses. Now we're 100% sure who uses what resources (otherwise the // compilation would have failed), so we do a round on marking UAV's as // having counter. static auto IsDxilOp = [](Value *V, hlsl::OP::OpCode Op) -> bool { Instruction *I = dyn_cast<Instruction>(V); if (!I) return false; return hlsl::OP::IsDxilOpFuncCallInst(I, Op); }; // Search all users for update counter bool updateAnnotateHandle = res.IsGloballyCoherent(); if (!res.HasCounter()) { for (User *U : handle->users()) { if (IsDxilOp(U, hlsl::OP::OpCode::BufferUpdateCounter)) { res.SetHasCounter(true); break; } else if (IsDxilOp(U, hlsl::OP::OpCode::AnnotateHandle)) { for (User *UU : U->users()) { if (IsDxilOp(UU, hlsl::OP::OpCode::BufferUpdateCounter)) { res.SetHasCounter(true); updateAnnotateHandle = true; break; } } if (updateAnnotateHandle) break; } } } if (updateAnnotateHandle) { // Update resource props with counter flag DxilResourceProperties RP = resource_helper::loadPropsFromResourceBase(&res); // Require ShaderModule to reconstruct resource property constant const ShaderModel *pSM = m_DM->GetShaderModel(); SmallVector<Instruction *, 4> annotHandles; for (User *U : handle->users()) { DxilInst_AnnotateHandle annotateHandle(cast<Instruction>(U)); if (annotateHandle) { annotHandles.emplace_back(cast<Instruction>(U)); } } if (!annotHandles.empty()) { Instruction *firstAnnot = annotHandles.pop_back_val(); DxilInst_AnnotateHandle annotateHandle(firstAnnot); // Update props. Constant *propsConst = resource_helper::getAsConstant( RP, annotateHandle.get_props()->getType(), *pSM); annotateHandle.set_props(propsConst); if (!annotHandles.empty()) { // Move firstAnnot after handle. firstAnnot->removeFromParent(); firstAnnot->insertAfter(handle); // Remove redundant annotate handles. for (auto *annotHdl : annotHandles) { annotHdl->replaceAllUsesWith(firstAnnot); annotHdl->eraseFromParent(); } } } } } load->eraseFromParent(); } void DxilLowerCreateHandleForLib::TranslateDxilResourceUses( DxilResourceBase &res) { OP *hlslOP = m_DM->GetOP(); // Generate createHandleFromBinding for sm66 and later. bool bCreateFromBinding = m_DM->GetShaderModel()->IsSM66Plus(); OP::OpCode createOp = bCreateFromBinding ? OP::OpCode::CreateHandleFromBinding : OP::OpCode::CreateHandle; Function *createHandle = hlslOP->GetOpFunc(createOp, llvm::Type::getVoidTy(m_DM->GetCtx())); Value *opArg = hlslOP->GetU32Const((unsigned)createOp); bool isViewResource = res.GetClass() == DXIL::ResourceClass::SRV || res.GetClass() == DXIL::ResourceClass::UAV; bool isROV = isViewResource && static_cast<DxilResource &>(res).IsROV(); std::string handleName = (res.GetGlobalName() + Twine("_") + Twine(res.GetResClassName())).str(); if (isViewResource) handleName += (Twine("_") + Twine(res.GetResDimName())).str(); if (isROV) handleName += "_ROV"; Value *resClassArg = hlslOP->GetU8Const( static_cast<std::underlying_type<DxilResourceBase::Class>::type>( res.GetClass())); Value *resIDArg = hlslOP->GetU32Const(res.GetID()); // resLowerBound will be added after allocation in DxilCondenseResources. Value *resLowerBound = hlslOP->GetU32Const(res.GetLowerBound()); Value *isUniformRes = hlslOP->GetI1Const(0); Value *GV = res.GetGlobalSymbol(); DXASSERT(isa<GlobalValue>(GV), "DxilLowerCreateHandleForLib cannot deal with unused resources."); Module *pM = m_DM->GetModule(); // TODO: add debug info to create handle. DIVariable *DIV = nullptr; DILocation *DL = nullptr; if (m_HasDbgInfo) { DebugInfoFinder &Finder = m_DM->GetOrCreateDebugInfoFinder(); DIV = dxilutil::FindGlobalVariableDebugInfo(cast<GlobalVariable>(GV), Finder); if (DIV) // TODO: how to get col? DL = DILocation::get(pM->getContext(), DIV->getLine(), 1, DIV->getScope()); } bool isResArray = res.GetRangeSize() > 1; std::unordered_map<Function *, Instruction *> handleMapOnFunction; Value *createHandleArgs[] = {opArg, resClassArg, resIDArg, resLowerBound, isUniformRes}; DxilResourceBinding binding = resource_helper::loadBindingFromResourceBase(&res); Value *bindingV = resource_helper::getAsConstant( binding, hlslOP->GetResourceBindingType(), *m_DM->GetShaderModel()); Value *createHandleFromBindingArgs[] = {opArg, bindingV, resLowerBound, isUniformRes}; MutableArrayRef<Value *> Args(bCreateFromBinding ? createHandleFromBindingArgs : createHandleArgs, bCreateFromBinding ? 4 : 5); const unsigned resIdxOpIdx = bCreateFromBinding ? DxilInst_CreateHandleFromBinding::arg_index : DxilInst_CreateHandle::arg_index; const unsigned nonUniformOpIdx = bCreateFromBinding ? DxilInst_CreateHandleFromBinding::arg_nonUniformIndex : DxilInst_CreateHandle::arg_nonUniformIndex; for (iplist<Function>::iterator F : pM->getFunctionList()) { if (!F->isDeclaration()) { if (!isResArray) { IRBuilder<> Builder(dxilutil::FindAllocaInsertionPt(F)); if (m_HasDbgInfo) { // TODO: set debug info. // Builder.SetCurrentDebugLocation(DL); } handleMapOnFunction[F] = Builder.CreateCall(createHandle, Args, handleName); } } } for (auto U = GV->user_begin(), E = GV->user_end(); U != E;) { User *user = *(U++); // Skip unused user. if (user->user_empty()) continue; if (LoadInst *ldInst = dyn_cast<LoadInst>(user)) { Function *userF = ldInst->getParent()->getParent(); DXASSERT(handleMapOnFunction.count(userF), "must exist"); Instruction *handle = handleMapOnFunction[userF]; ReplaceResourceUserWithHandle(static_cast<DxilResource &>(res), ldInst, handle); } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(user)) { Value *idx = flattenGepIdx(GEP); Args[resIdxOpIdx] = idx; Args[nonUniformOpIdx] = isUniformRes; Instruction *handle = nullptr; if (GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(GEP)) { IRBuilder<> Builder = IRBuilder<>(GEPInst); if (DxilMDHelper::IsMarkedNonUniform(GEPInst)) { // Mark nonUniform. Args[nonUniformOpIdx] = hlslOP->GetI1Const(1); // Clear nonUniform on GEP. GEPInst->setMetadata(DxilMDHelper::kDxilNonUniformAttributeMDName, nullptr); } Args[resIdxOpIdx] = Builder.CreateAdd(idx, resLowerBound); handle = Builder.CreateCall(createHandle, Args, handleName); } for (auto GEPU = GEP->user_begin(), GEPE = GEP->user_end(); GEPU != GEPE;) { // Must be load inst. LoadInst *ldInst = cast<LoadInst>(*(GEPU++)); if (handle) { ReplaceResourceUserWithHandle(static_cast<DxilResource &>(res), ldInst, handle); } else { IRBuilder<> Builder = IRBuilder<>(ldInst); Args[resIdxOpIdx] = Builder.CreateAdd(idx, resLowerBound); Instruction *localHandle = Builder.CreateCall(createHandle, Args, handleName); ReplaceResourceUserWithHandle(static_cast<DxilResource &>(res), ldInst, localHandle); } } if (Instruction *I = dyn_cast<Instruction>(GEP)) { I->eraseFromParent(); } } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(user)) { DXASSERT(onlyUsedByLifetimeMarkers(BCI), "expected bitcast to only be used by lifetime intrinsics"); for (auto BCIU = BCI->user_begin(), BCIE = BCI->user_end(); BCIU != BCIE;) { IntrinsicInst *II = cast<IntrinsicInst>(*(BCIU++)); II->eraseFromParent(); } BCI->eraseFromParent(); } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(user)) { // A GEPOperator can also be a ConstantExpr, so it must be checked before // this code. DXASSERT(CE->getOpcode() == Instruction::BitCast, "expected bitcast"); DXASSERT(onlyUsedByLifetimeMarkers(CE), "expected ConstantExpr to only be used by lifetime intrinsics"); for (auto CEU = CE->user_begin(), CEE = CE->user_end(); CEU != CEE;) { IntrinsicInst *II = cast<IntrinsicInst>(*(CEU++)); II->eraseFromParent(); } } else { DXASSERT(false, "AddOpcodeParamForIntrinsic in CodeGen did not patch uses " "to only have ld/st refer to temp object"); } } // Erase unused handle. for (auto It : handleMapOnFunction) { Instruction *I = It.second; if (I->user_empty()) I->eraseFromParent(); } } void DxilLowerCreateHandleForLib::GenerateDxilResourceHandles() { for (size_t i = 0; i < m_DM->GetCBuffers().size(); i++) { DxilCBuffer &C = m_DM->GetCBuffer(i); TranslateDxilResourceUses(C); } // Create sampler handle first, may be used by SRV operations. for (size_t i = 0; i < m_DM->GetSamplers().size(); i++) { DxilSampler &S = m_DM->GetSampler(i); TranslateDxilResourceUses(S); } for (size_t i = 0; i < m_DM->GetSRVs().size(); i++) { DxilResource &SRV = m_DM->GetSRV(i); TranslateDxilResourceUses(SRV); } for (size_t i = 0; i < m_DM->GetUAVs().size(); i++) { DxilResource &UAV = m_DM->GetUAV(i); TranslateDxilResourceUses(UAV); } } // TBuffer. namespace { void InitTBuffer(const DxilCBuffer *pSource, DxilResource *pDest) { pDest->SetKind(pSource->GetKind()); pDest->SetCompType(DXIL::ComponentType::U32); pDest->SetSampleCount(0); pDest->SetElementStride(0); pDest->SetGloballyCoherent(false); pDest->SetHasCounter(false); pDest->SetRW(false); pDest->SetROV(false); pDest->SetID(pSource->GetID()); pDest->SetSpaceID(pSource->GetSpaceID()); pDest->SetLowerBound(pSource->GetLowerBound()); pDest->SetRangeSize(pSource->GetRangeSize()); pDest->SetGlobalSymbol(pSource->GetGlobalSymbol()); pDest->SetGlobalName(pSource->GetGlobalName()); pDest->SetHandle(pSource->GetHandle()); pDest->SetHLSLType(pSource->GetHLSLType()); } void PatchTBufferLoad(CallInst *handle, DxilModule &DM, DenseSet<Value *> &patchedSet) { if (patchedSet.count(handle)) return; patchedSet.insert(handle); hlsl::OP *hlslOP = DM.GetOP(); llvm::LLVMContext &Ctx = DM.GetCtx(); Type *doubleTy = Type::getDoubleTy(Ctx); Type *i64Ty = Type::getInt64Ty(Ctx); // Replace corresponding cbuffer loads with typed buffer loads for (auto U = handle->user_begin(); U != handle->user_end();) { User *user = *(U++); CallInst *I = dyn_cast<CallInst>(user); // Could also be store for out arg in lib. DXASSERT(isa<StoreInst>(user) || (I && OP::IsDxilOpFuncCallInst(I)), "otherwise unexpected user of CreateHandle value"); if (!I) continue; DXIL::OpCode opcode = OP::GetDxilOpFuncCallInst(I); if (opcode == DXIL::OpCode::CBufferLoadLegacy) { DxilInst_CBufferLoadLegacy cbLoad(I); StructType *cbRetTy = cast<StructType>(I->getType()); // elements will be 4, or 8 for native 16-bit types, which require special // handling. bool cbRet8Elt = cbRetTy->getNumElements() > 4; // Replace with appropriate buffer load instruction IRBuilder<> Builder(I); opcode = OP::OpCode::BufferLoad; Type *Ty = Type::getInt32Ty(Ctx); Function *BufLoad = hlslOP->GetOpFunc(opcode, Ty); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); Value *undefI = UndefValue::get(Type::getInt32Ty(Ctx)); Value *offset = cbLoad.get_regIndex(); CallInst *load = Builder.CreateCall(BufLoad, {opArg, handle, offset, undefI}); // Find extractelement uses of cbuffer load and replace + generate bitcast // as necessary for (auto LU = I->user_begin(); LU != I->user_end();) { ExtractValueInst *evInst = dyn_cast<ExtractValueInst>(*(LU++)); DXASSERT(evInst && evInst->getNumIndices() == 1, "user of cbuffer load result should be extractvalue"); uint64_t idx = evInst->getIndices()[0]; Type *EltTy = evInst->getType(); IRBuilder<> EEBuilder(evInst); Value *result = nullptr; if (EltTy != Ty) { // extract two values and DXIL::OpCode::MakeDouble or construct i64 if ((EltTy == doubleTy) || (EltTy == i64Ty)) { DXASSERT(idx < 2, "64-bit component index out of range"); // This assumes big endian order in tbuffer elements (is this // correct?) Value *low = EEBuilder.CreateExtractValue(load, idx * 2); Value *high = EEBuilder.CreateExtractValue(load, idx * 2 + 1); if (EltTy == doubleTy) { opcode = OP::OpCode::MakeDouble; Function *MakeDouble = hlslOP->GetOpFunc(opcode, doubleTy); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); result = EEBuilder.CreateCall(MakeDouble, {opArg, low, high}); } else { high = EEBuilder.CreateZExt(high, i64Ty); low = EEBuilder.CreateZExt(low, i64Ty); high = EEBuilder.CreateShl(high, hlslOP->GetU64Const(32)); result = EEBuilder.CreateOr(high, low); } } else { if (cbRet8Elt) { DXASSERT_NOMSG(cbRetTy->getNumElements() == 8); DXASSERT_NOMSG(EltTy->getScalarSizeInBits() == 16); // Translate extract from 16bit x 8 to extract and translate from // i32 by 4 result = EEBuilder.CreateExtractValue(load, idx >> 1); if (idx & 1) result = EEBuilder.CreateLShr(result, 16); result = EEBuilder.CreateTrunc(result, Type::getInt16Ty(Ctx)); if (EltTy->isHalfTy()) result = EEBuilder.CreateBitCast(result, EltTy); } else { result = EEBuilder.CreateExtractValue(load, idx); if (Ty->getScalarSizeInBits() > EltTy->getScalarSizeInBits()) { if (EltTy->isIntegerTy()) { result = EEBuilder.CreateTrunc(result, EltTy); } else { result = EEBuilder.CreateBitCast(result, Type::getFloatTy(Ctx)); result = EEBuilder.CreateFPTrunc(result, EltTy); } } else { result = EEBuilder.CreateBitCast(result, EltTy); } } } } else { result = EEBuilder.CreateExtractValue(load, idx); } evInst->replaceAllUsesWith(result); evInst->eraseFromParent(); } } else if (opcode == DXIL::OpCode::CBufferLoad) { // TODO: Handle this, or prevent this for tbuffer DXASSERT(false, "otherwise CBufferLoad used for tbuffer rather than " "CBufferLoadLegacy"); } else if (opcode == DXIL::OpCode::AnnotateHandle) { PatchTBufferLoad(cast<CallInst>(I), DM, patchedSet); continue; } else if (opcode == DXIL::OpCode::BufferLoad) { // Already translated, skip. continue; } else { DXASSERT(false, "otherwise unexpected user of CreateHandle value"); } I->eraseFromParent(); } } } // namespace void DxilLowerCreateHandleForLib::PatchTBufferUse( Value *V, DxilModule &DM, DenseSet<Value *> &patchedSet) { for (User *U : V->users()) { if (CallInst *CI = dyn_cast<CallInst>(U)) { // Patch dxil call. if (hlsl::OP::IsDxilOpFuncCallInst(CI)) PatchTBufferLoad(CI, DM, patchedSet); } else { PatchTBufferUse(U, DM, patchedSet); } } } bool DxilLowerCreateHandleForLib::PatchDynamicTBuffers(DxilModule &DM) { hlsl::OP *hlslOP = DM.GetOP(); Function *AnnotHandleFn = hlslOP->GetOpFunc(DXIL::OpCode::AnnotateHandle, Type::getVoidTy(DM.GetCtx())); if (AnnotHandleFn->user_empty()) { AnnotHandleFn->eraseFromParent(); return false; } bool bUpdated = false; for (User *U : AnnotHandleFn->users()) { CallInst *CI = cast<CallInst>(U); DxilInst_AnnotateHandle annot(CI); DxilResourceProperties RP = resource_helper::loadPropsFromAnnotateHandle( annot, *DM.GetShaderModel()); if (RP.getResourceKind() != DXIL::ResourceKind::TBuffer) continue; // Skip handle from createHandleForLib which take care in PatchTBuffers. if (CallInst *HdlCI = dyn_cast<CallInst>(annot.get_res())) { if (hlslOP->IsDxilOpFuncCallInst(HdlCI)) { if (hlslOP->GetDxilOpFuncCallInst(HdlCI) == DXIL::OpCode::CreateHandleForLib) continue; } } DenseSet<Value *> patchedSet; PatchTBufferLoad(CI, DM, patchedSet); bUpdated = true; } return bUpdated; } bool DxilLowerCreateHandleForLib::PatchTBuffers(DxilModule &DM) { bool bChanged = false; // move tbuffer resources to SRVs Module &M = *DM.GetModule(); const ShaderModel &SM = *DM.GetShaderModel(); DenseSet<Value *> patchedSet; // First, patch users of AnnotateHandle calls if we have them. // This will pick up uses in lib_6_x functions that otherwise // would be missed. if (SM.IsSM66Plus()) { for (auto it : DM.GetOP()->GetOpFuncList(DXIL::OpCode::AnnotateHandle)) { Function *F = it.second; for (auto U = F->user_begin(); U != F->user_end();) { User *user = *(U++); if (CallInst *CI = dyn_cast<CallInst>(user)) { DxilInst_AnnotateHandle AH(CI); if (AH) { DxilResourceProperties RP = resource_helper::loadPropsFromAnnotateHandle(AH, SM); if (RP.getResourceKind() == DXIL::ResourceKind::TBuffer) PatchTBufferLoad(CI, DM, patchedSet); } } } } } unsigned offset = DM.GetSRVs().size(); for (auto it = DM.GetCBuffers().begin(); it != DM.GetCBuffers().end(); it++) { DxilCBuffer *CB = it->get(); if (CB->GetKind() == DXIL::ResourceKind::TBuffer) { auto srv = make_unique<DxilResource>(); InitTBuffer(CB, srv.get()); srv->SetID(offset++); DM.AddSRV(std::move(srv)); GlobalVariable *GV = dyn_cast<GlobalVariable>(CB->GetGlobalSymbol()); if (GV == nullptr) continue; PatchTBufferUse(GV, DM, patchedSet); // Set global symbol for cbuffer to an unused value so it can be removed // in RemoveUnusedResourceSymbols. Type *Ty = GV->getType()->getElementType(); GlobalVariable *NewGV = new GlobalVariable( M, Ty, GV->isConstant(), GV->getLinkage(), /*Initializer*/ nullptr, GV->getName(), /*InsertBefore*/ nullptr, GV->getThreadLocalMode(), GV->getType()->getAddressSpace(), GV->isExternallyInitialized()); CB->SetGlobalSymbol(NewGV); bChanged = true; } } return bChanged; } typedef DenseMap<Value *, unsigned> OffsetForValueMap; // Find the imm offset part from a value. // It must exist unless offset is 0. static unsigned GetCBOffset(Value *V, OffsetForValueMap &visited) { auto it = visited.find(V); if (it != visited.end()) return it->second; visited[V] = 0; unsigned result = 0; if (ConstantInt *Imm = dyn_cast<ConstantInt>(V)) { result = Imm->getLimitedValue(); } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) { switch (BO->getOpcode()) { case Instruction::Add: { unsigned left = GetCBOffset(BO->getOperand(0), visited); unsigned right = GetCBOffset(BO->getOperand(1), visited); result = left + right; } break; case Instruction::Or: { unsigned left = GetCBOffset(BO->getOperand(0), visited); unsigned right = GetCBOffset(BO->getOperand(1), visited); result = left | right; } break; default: break; } } else if (SelectInst *SI = dyn_cast<SelectInst>(V)) { result = std::min(GetCBOffset(SI->getOperand(1), visited), GetCBOffset(SI->getOperand(2), visited)); } else if (PHINode *PN = dyn_cast<PHINode>(V)) { result = UINT_MAX; for (unsigned i = 0, ops = PN->getNumIncomingValues(); i < ops; ++i) { result = std::min(result, GetCBOffset(PN->getIncomingValue(i), visited)); } } visited[V] = result; return result; } typedef std::map<unsigned, DxilFieldAnnotation *> FieldAnnotationByOffsetMap; // Returns size in bits of the field if it's a basic type, otherwise 0. static unsigned MarkCBUse(unsigned offset, FieldAnnotationByOffsetMap &fieldMap) { auto it = fieldMap.upper_bound(offset); it--; if (it != fieldMap.end()) { it->second->SetCBVarUsed(true); return it->second->GetCompType().GetSizeInBits(); } return 0; } // Detect patterns of lshr v,16 or trunc to 16-bits and return low and high // word usage. static const unsigned kLowWordUsed = 1; static const unsigned kHighWordUsed = 2; static const unsigned kLowHighWordMask = kLowWordUsed | kHighWordUsed; static unsigned DetectLowAndHighWordUsage(ExtractValueInst *EV) { unsigned result = 0; if (EV->getType()->getScalarSizeInBits() == 32) { for (auto U : EV->users()) { Instruction *I = cast<Instruction>(U); if (I->getOpcode() == Instruction::LShr) { ConstantInt *CShift = dyn_cast<ConstantInt>(I->getOperand(1)); if (CShift && CShift->getLimitedValue() == 16) result |= kHighWordUsed; } else if (I->getOpcode() == Instruction::Trunc && I->getType()->getPrimitiveSizeInBits() == 16) { result |= kLowWordUsed; } else { // Assume whole dword is used, return 0 return 0; } if ((result & kLowHighWordMask) == kLowHighWordMask) break; } } return result; } static unsigned GetOffsetForCBExtractValue(ExtractValueInst *EV, bool bMinPrecision, unsigned &lowHighWordUsage) { DXASSERT(EV->getNumIndices() == 1, "otherwise, unexpected indices/type for extractvalue"); unsigned typeSize = 4; unsigned bits = EV->getType()->getScalarSizeInBits(); if (bits == 64) typeSize = 8; else if (bits == 16 && !bMinPrecision) typeSize = 2; lowHighWordUsage = DetectLowAndHighWordUsage(EV); return (EV->getIndices().front() * typeSize); } // Marks up to two CB uses for the case where only 16-bit type(s) // are being used from lower or upper word of a tbuffer load, // which is always 4 x 32 instead of 8 x 16, like cbuffer. static void MarkCBUsesForExtractElement(unsigned offset, FieldAnnotationByOffsetMap &fieldMap, ExtractValueInst *EV, bool bMinPrecision) { unsigned lowHighWordUsage = 0; unsigned evOffset = GetOffsetForCBExtractValue(EV, bMinPrecision, lowHighWordUsage); // For tbuffer, where value extracted is always 32-bits: // If lowHighWordUsage is 0, it means 32-bits used. // If field marked is < 32 bits, we still need to mark the high 16-bits as // used, in case there is another 16-bit field. // Since MarkCBUse could return 0 on non-basic type field, look for 16 // when determining whether we still need to mark high word as used. bool highUnmarked = EV->getType()->getScalarSizeInBits() == 32; if (!lowHighWordUsage || 0 != (lowHighWordUsage & kLowWordUsed)) highUnmarked &= MarkCBUse(offset + evOffset, fieldMap) == 16; if (highUnmarked && (!lowHighWordUsage || 0 != (lowHighWordUsage & kHighWordUsed))) MarkCBUse(offset + evOffset + 2, fieldMap); } static void CollectInPhiChain(PHINode *cbUser, unsigned offset, std::unordered_set<Value *> &userSet, FieldAnnotationByOffsetMap &fieldMap, bool bMinPrecision) { if (userSet.count(cbUser) > 0) return; userSet.insert(cbUser); for (User *cbU : cbUser->users()) { if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(cbU)) { MarkCBUsesForExtractElement(offset, fieldMap, EV, bMinPrecision); } else { PHINode *phi = cast<PHINode>(cbU); CollectInPhiChain(phi, offset, userSet, fieldMap, bMinPrecision); } } } static void CollectCBufferMemberUsage(Value *V, FieldAnnotationByOffsetMap &legacyFieldMap, FieldAnnotationByOffsetMap &newFieldMap, hlsl::OP *hlslOP, bool bMinPrecision, OffsetForValueMap &visited) { for (auto U : V->users()) { if (Constant *C = dyn_cast<Constant>(U)) { CollectCBufferMemberUsage(C, legacyFieldMap, newFieldMap, hlslOP, bMinPrecision, visited); } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) { CollectCBufferMemberUsage(U, legacyFieldMap, newFieldMap, hlslOP, bMinPrecision, visited); } else if (CallInst *CI = dyn_cast<CallInst>(U)) { if (hlslOP->IsDxilOpFuncCallInst(CI)) { hlsl::OP::OpCode op = hlslOP->GetDxilOpFuncCallInst(CI); if (op == DXIL::OpCode::CreateHandleForLib) { CollectCBufferMemberUsage(U, legacyFieldMap, newFieldMap, hlslOP, bMinPrecision, visited); } else if (op == DXIL::OpCode::AnnotateHandle) { CollectCBufferMemberUsage(U, legacyFieldMap, newFieldMap, hlslOP, bMinPrecision, visited); } else if (op == DXIL::OpCode::CBufferLoadLegacy || op == DXIL::OpCode::BufferLoad) { Value *resIndex = (op == DXIL::OpCode::CBufferLoadLegacy) ? DxilInst_CBufferLoadLegacy(CI).get_regIndex() : DxilInst_BufferLoad(CI).get_index(); unsigned offset = GetCBOffset(resIndex, visited) << 4; for (User *cbU : U->users()) { if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(cbU)) { MarkCBUsesForExtractElement(offset, legacyFieldMap, EV, bMinPrecision); } else { PHINode *phi = cast<PHINode>(cbU); std::unordered_set<Value *> userSet; CollectInPhiChain(phi, offset, userSet, legacyFieldMap, bMinPrecision); } } } else if (op == DXIL::OpCode::CBufferLoad) { DxilInst_CBufferLoad cbload(CI); Value *byteOffset = cbload.get_byteOffset(); unsigned offset = GetCBOffset(byteOffset, visited); MarkCBUse(offset, newFieldMap); } } } } } void DxilLowerCreateHandleForLib::UpdateCBufferUsage() { DxilTypeSystem &TypeSys = m_DM->GetTypeSystem(); hlsl::OP *hlslOP = m_DM->GetOP(); const DataLayout &DL = m_DM->GetModule()->getDataLayout(); const auto &CBuffers = m_DM->GetCBuffers(); OffsetForValueMap visited; SmallVector<std::pair<GlobalVariable *, Type *>, 4> CBufferVars; // Collect cbuffers for (auto it = CBuffers.begin(); it != CBuffers.end(); it++) { DxilCBuffer *CB = it->get(); GlobalVariable *GV = dyn_cast<GlobalVariable>(CB->GetGlobalSymbol()); if (GV == nullptr) continue; CBufferVars.emplace_back(GV, CB->GetHLSLType()); } // Collect tbuffers for (auto &it : m_DM->GetSRVs()) { if (it->GetKind() != DXIL::ResourceKind::TBuffer) continue; GlobalVariable *GV = dyn_cast<GlobalVariable>(it->GetGlobalSymbol()); if (GV == nullptr) continue; CBufferVars.emplace_back(GV, it->GetHLSLType()); } for (auto GV_Ty : CBufferVars) { auto GV = GV_Ty.first; Type *ElemTy = GV_Ty.second->getPointerElementType(); ElemTy = dxilutil::StripArrayTypes(ElemTy, nullptr); StructType *ST = cast<StructType>(ElemTy); DxilStructAnnotation *SA = TypeSys.GetStructAnnotation(ST); if (SA == nullptr) continue; // If elements < 2, it's used if it exists. // Only old-style cbuffer { ... } will have more than one member, and // old-style cbuffers are the only ones that report usage per member. if (ST->getStructNumElements() < 2) { continue; } // Create offset maps for legacy layout and new compact layout, while // resetting usage flags const StructLayout *SL = DL.getStructLayout(ST); FieldAnnotationByOffsetMap legacyFieldMap, newFieldMap; for (unsigned i = 0; i < SA->GetNumFields(); ++i) { DxilFieldAnnotation &FA = SA->GetFieldAnnotation(i); FA.SetCBVarUsed(false); legacyFieldMap[FA.GetCBufferOffset()] = &FA; newFieldMap[(unsigned)SL->getElementOffset(i)] = &FA; } CollectCBufferMemberUsage(GV, legacyFieldMap, newFieldMap, hlslOP, m_DM->GetUseMinPrecision(), visited); } } void DxilLowerCreateHandleForLib::SetNonUniformIndexForDynamicResource( DxilModule &DM) { hlsl::OP *hlslOP = DM.GetOP(); Value *TrueVal = hlslOP->GetI1Const(true); for (auto it : hlslOP->GetOpFuncList(DXIL::OpCode::CreateHandleFromHeap)) { Function *F = it.second; if (!F) continue; for (User *U : F->users()) { CallInst *CI = cast<CallInst>(U); if (!DxilMDHelper::IsMarkedNonUniform(CI)) continue; // Set NonUniform to be true. CI->setOperand(DxilInst_CreateHandleFromHeap::arg_nonUniformIndex, TrueVal); // Clear nonUniform metadata. CI->setMetadata(DxilMDHelper::kDxilNonUniformAttributeMDName, nullptr); } } } // Remove createHandleFromHandle when not a lib void DxilLowerCreateHandleForLib::RemoveCreateHandleFromHandle(DxilModule &DM) { hlsl::OP *hlslOP = DM.GetOP(); Type *HdlTy = hlslOP->GetHandleType(); for (auto it : hlslOP->GetOpFuncList(DXIL::OpCode::CreateHandleForLib)) { Function *F = it.second; if (!F) continue; if (it.first != HdlTy) continue; for (auto it = F->users().begin(); it != F->users().end();) { User *U = *(it++); CallInst *CI = cast<CallInst>(U); DxilInst_CreateHandleForLib Hdl(CI); Value *Res = Hdl.get_Resource(); CI->replaceAllUsesWith(Res); CI->eraseFromParent(); } break; } } char DxilLowerCreateHandleForLib::ID = 0; ModulePass *llvm::createDxilLowerCreateHandleForLibPass() { return new DxilLowerCreateHandleForLib(); } INITIALIZE_PASS_BEGIN(DxilLowerCreateHandleForLib, "hlsl-dxil-lower-handle-for-lib", "DXIL Lower createHandleForLib", false, false) INITIALIZE_PASS_DEPENDENCY(DxilValueCache) INITIALIZE_PASS_END(DxilLowerCreateHandleForLib, "hlsl-dxil-lower-handle-for-lib", "DXIL Lower createHandleForLib", false, false) class DxilAllocateResourcesForLib : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilAllocateResourcesForLib() : ModulePass(ID), m_AutoBindingSpace(UINT_MAX) {} void applyOptions(PassOptions O) override { GetPassOptionUInt32(O, "auto-binding-space", &m_AutoBindingSpace, UINT_MAX); } StringRef getPassName() const override { return "DXIL Allocate Resources For Library"; } bool runOnModule(Module &M) override { DxilModule &DM = M.GetOrCreateDxilModule(); // Must specify a default space, and must apply to library. // Use DxilCondenseResources instead for shaders. if ((m_AutoBindingSpace == UINT_MAX) || !DM.GetShaderModel()->IsLib()) return false; bool hasResource = DM.GetCBuffers().size() || DM.GetUAVs().size() || DM.GetSRVs().size() || DM.GetSamplers().size(); if (hasResource) { DM.SetAutoBindingSpace(m_AutoBindingSpace); DxilResourceRegisterAllocator ResourceRegisterAllocator; ResourceRegisterAllocator.AllocateRegisters(DM); } return true; } private: uint32_t m_AutoBindingSpace; }; char DxilAllocateResourcesForLib::ID = 0; ModulePass *llvm::createDxilAllocateResourcesForLibPass() { return new DxilAllocateResourcesForLib(); } INITIALIZE_PASS(DxilAllocateResourcesForLib, "hlsl-dxil-allocate-resources-for-lib", "DXIL Allocate Resources For Library", false, false) namespace { struct CreateHandleFromHeapArgs { Value *Index; bool isSampler; bool isNonUniform; // All incoming handle args are confirmed. // If not resolved, some of the incoming handle is not from // createHandleFromHeap. Might be resolved after linking for lib. bool isResolved; void merge(CreateHandleFromHeapArgs &args, ResourceUseErrors &Errors, Value *mergeHdl) { if (args.isSampler != isSampler) { // Report error. Errors.ReportError(ResourceUseErrors::ErrorCode::MismatchIsSampler, mergeHdl); } args.isNonUniform |= isNonUniform; } }; } // namespace // Helper class for legalizing dynamic resource use // Convert select/phi on resources to select/phi on index. // TODO: support case when save dynamic resource as local array element. // TODO: share code with LegalizeResourceUseHelper. class LegalizeDynamicResourceUseHelper { public: ResourceUseErrors m_Errors; DenseMap<Value *, CreateHandleFromHeapArgs> HandleToArgs; // Value sets we can use to iterate ValueSetVector HandleSelects; ResourceUseErrors Errors; std::unordered_set<Instruction *> CleanupInsts; void mergeHeapArgs(Value *SelHdl, Value *SelIdx, User::op_range Hdls) { CreateHandleFromHeapArgs args = {nullptr, false, false, true}; for (Value *V : Hdls) { auto it = HandleToArgs.find(V); // keep invalid when V is not createHandleFromHeap. if (it == HandleToArgs.end()) { args.isResolved = false; continue; } CreateHandleFromHeapArgs &itArgs = it->second; if (!itArgs.isResolved) { args.isResolved = false; continue; } if (args.Index != nullptr) { args.merge(itArgs, Errors, SelHdl); } else { args.Index = SelIdx; args.isNonUniform = itArgs.isNonUniform; args.isSampler = itArgs.isSampler; } } // set Index when all incoming Hdls cannot be resolved. if (args.Index == nullptr) args.Index = SelIdx; HandleToArgs[SelHdl] = args; } void CreateSelectsForHandleSelects() { if (HandleSelects.empty()) return; LLVMContext &Ctx = HandleSelects[0]->getContext(); Type *i32Ty = Type::getInt32Ty(Ctx); Value *UndefValue = UndefValue::get(i32Ty); // Create select for each HandleSelects. for (auto &Select : HandleSelects) { if (PHINode *Phi = dyn_cast<PHINode>(Select)) { IRBuilder<> B(Phi); unsigned numIncoming = Phi->getNumIncomingValues(); PHINode *newPhi = B.CreatePHI(i32Ty, numIncoming); for (unsigned j = 0; j < numIncoming; j++) { // Set incoming values to undef until next pass newPhi->addIncoming(UndefValue, Phi->getIncomingBlock(j)); } mergeHeapArgs(Phi, newPhi, Phi->incoming_values()); } else if (SelectInst *Sel = dyn_cast<SelectInst>(Select)) { IRBuilder<> B(Sel); Value *newSel = B.CreateSelect(Sel->getCondition(), UndefValue, UndefValue); User::op_range range = User::op_range(Sel->getOperandList() + 1, Sel->getOperandList() + 3); mergeHeapArgs(Sel, newSel, range); } else { DXASSERT(false, "otherwise, non-select/phi in Selects set"); } } } // propagate CreateHandleFromHeapArgs for HandleSel which all operands are // other HandleSel. void PropagateHeapArgs() { SmallVector<Value *, 4> Candidates; for (auto &Select : HandleSelects) { CreateHandleFromHeapArgs &args = HandleToArgs[Select]; if (args.isResolved) continue; Candidates.emplace_back(Select); } while (1) { SmallVector<Value *, 4> NextPass; for (auto &Select : Candidates) { CreateHandleFromHeapArgs &args = HandleToArgs[Select]; if (PHINode *Phi = dyn_cast<PHINode>(Select)) { mergeHeapArgs(Phi, args.Index, Phi->incoming_values()); } else if (SelectInst *Sel = dyn_cast<SelectInst>(Select)) { User::op_range range = User::op_range(Sel->getOperandList() + 1, Sel->getOperandList() + 3); mergeHeapArgs(Sel, args.Index, range); } else { DXASSERT(false, "otherwise, non-select/phi in Selects set"); } if (args.isResolved) continue; NextPass.emplace_back(Select); } // Some node cannot be reached. if (NextPass.size() == Candidates.size()) return; Candidates = NextPass; } } void UpdateSelectsForHandleSelect(hlsl::OP *hlslOP) { if (HandleSelects.empty()) return; LLVMContext &Ctx = HandleSelects[0]->getContext(); Type *pVoidTy = Type::getVoidTy(Ctx); // NOTE: phi of createHandleFromHeap and createHandleFromBinding // is not supported. Function *createHdlFromHeap = hlslOP->GetOpFunc(DXIL::OpCode::CreateHandleFromHeap, pVoidTy); Value *hdlFromHeapOP = hlslOP->GetI32Const( static_cast<unsigned>(DXIL::OpCode::CreateHandleFromHeap)); for (auto &Select : HandleSelects) { if (PHINode *Phi = dyn_cast<PHINode>(Select)) { unsigned numIncoming = Phi->getNumIncomingValues(); CreateHandleFromHeapArgs &args = HandleToArgs[Phi]; PHINode *newPhi = cast<PHINode>(args.Index); if (args.isResolved) { for (unsigned j = 0; j < numIncoming; j++) { Value *V = Phi->getIncomingValue(j); auto it = HandleToArgs.find(V); DXASSERT(it != HandleToArgs.end(), "args.isResolved should be false"); CreateHandleFromHeapArgs &itArgs = it->second; newPhi->setIncomingValue(j, itArgs.Index); } IRBuilder<> B(Phi->getParent()->getFirstNonPHI()); B.SetCurrentDebugLocation(Phi->getDebugLoc()); Value *isSampler = hlslOP->GetI1Const(args.isSampler); // TODO: or args.IsNonUniform with !isUniform(Phi) with uniform // analysis. Value *isNonUniform = hlslOP->GetI1Const(args.isNonUniform); CallInst *newCI = B.CreateCall(createHdlFromHeap, {hdlFromHeapOP, newPhi, isSampler, isNonUniform}); Phi->replaceAllUsesWith(newCI); CleanupInsts.insert(Phi); // put newCI in HandleToArgs. HandleToArgs[newCI] = args; } else { newPhi->eraseFromParent(); } } else if (SelectInst *Sel = dyn_cast<SelectInst>(Select)) { CreateHandleFromHeapArgs &args = HandleToArgs[Sel]; SelectInst *newSel = cast<SelectInst>(args.Index); if (args.isResolved) { for (unsigned j = 1; j < 3; ++j) { Value *V = Sel->getOperand(j); auto it = HandleToArgs.find(V); DXASSERT(it != HandleToArgs.end(), "args.isResolved should be false"); CreateHandleFromHeapArgs &itArgs = it->second; newSel->setOperand(j, itArgs.Index); } IRBuilder<> B(newSel->getNextNode()); B.SetCurrentDebugLocation(newSel->getDebugLoc()); Value *isSampler = hlslOP->GetI1Const(args.isSampler); // TODO: or args.IsNonUniform with !isUniform(Phi). Value *isNonUniform = hlslOP->GetI1Const(args.isNonUniform); CallInst *newCI = B.CreateCall(createHdlFromHeap, {hdlFromHeapOP, newSel, isSampler, isNonUniform}); Sel->replaceAllUsesWith(newCI); CleanupInsts.insert(Sel); // put newCI in HandleToArgs. HandleToArgs[newCI] = args; } else { newSel->eraseFromParent(); } } else { DXASSERT(false, "otherwise, non-select/phi in HandleSelects set"); } } } void CollectResources(DxilModule &DM) { ValueSetVector tmpHandleSelects; hlsl::OP *hlslOP = DM.GetOP(); if (hlslOP->IsDxilOpUsed(DXIL::OpCode::CreateHandleFromHeap)) { Function *F = hlslOP->GetOpFunc(DXIL::OpCode::CreateHandleFromHeap, Type::getVoidTy(DM.GetCtx())); for (User *U : F->users()) { DxilInst_CreateHandleFromHeap Hdl(cast<CallInst>(U)); HandleToArgs[U] = {Hdl.get_index(), Hdl.get_samplerHeap_val(), Hdl.get_nonUniformIndex_val(), true}; for (User *HdlU : U->users()) { if (isa<PHINode>(HdlU) || isa<SelectInst>(HdlU)) { tmpHandleSelects.insert(HdlU); } } } } // Collect phi/sel of other phi/sel selected handles. while (!tmpHandleSelects.empty()) { HandleSelects.insert(tmpHandleSelects.begin(), tmpHandleSelects.end()); ValueSetVector newHandleSelects; for (Value *Hdl : tmpHandleSelects) { for (User *HdlU : Hdl->users()) { if (HandleSelects.count(HdlU)) continue; if (isa<PHINode>(HdlU) || isa<SelectInst>(HdlU)) { newHandleSelects.insert(HdlU); } } } tmpHandleSelects = newHandleSelects; } } void DoTransform(hlsl::OP *hlslOP) { CreateSelectsForHandleSelects(); PropagateHeapArgs(); UpdateSelectsForHandleSelect(hlslOP); CleanupUnusedValues(CleanupInsts); } bool runOnModule(llvm::Module &M) { DxilModule &DM = M.GetOrCreateDxilModule(); hlsl::OP *hlslOP = DM.GetOP(); CollectResources(DM); // If no selects or allocas are involved, there isn't anything to do if (HandleSelects.empty()) return false; DoTransform(hlslOP); return true; } }; namespace { // Make sure no phi/sel on annotateHandle. bool sinkAnnotateHandleAfterSelect(DxilModule &DM, Module &M) { // Collect AnnotateHandle calls. SmallVector<CallInst *, 4> annotHdls; hlsl::OP *op = DM.GetOP(); LLVMContext &Ctx = M.getContext(); Type *pVoidTy = Type::getVoidTy(Ctx); Function *annotHdlFn = op->GetOpFunc(DXIL::OpCode::AnnotateHandle, pVoidTy); for (auto it : op->GetOpFuncList(OP::OpCode::AnnotateHandle)) { Function *F = it.second; if (F == nullptr) continue; for (auto U = F->user_begin(); U != F->user_end();) { CallInst *CI = dyn_cast<CallInst>(*(U++)); annotHdls.emplace_back(CI); } } if (annotHdls.empty()) return false; SetVector<Instruction *> selectAnnotHdls; for (CallInst *CI : annotHdls) { for (User *U : CI->users()) { if (isa<PHINode>(U) || isa<SelectInst>(U)) selectAnnotHdls.insert(cast<Instruction>(U)); } } const ShaderModel *pSM = DM.GetShaderModel(); Type *propsTy = op->GetResourcePropertiesType(); Value *OpArg = op->GetI32Const(static_cast<unsigned>(DXIL::OpCode::AnnotateHandle)); ResourceUseErrors Errors; Value *undefHdl = UndefValue::get(op->GetHandleType()); // Sink annotateHandle after phi. for (Instruction *Hdl : selectAnnotHdls) { if (PHINode *phi = dyn_cast<PHINode>(Hdl)) { Value *props = nullptr; for (unsigned i = 0; i < phi->getNumIncomingValues(); ++i) { Value *V = phi->getIncomingValue(i); if (CallInst *CI = dyn_cast<CallInst>(V)) { DxilInst_AnnotateHandle annot(CI); if (annot) { if (props == nullptr) { props = annot.get_props(); } else if (props != annot.get_props()) { props = resource_helper::tryMergeProps( cast<Constant>(props), cast<Constant>(annot.get_props()), propsTy, *pSM); if (props == nullptr) { Errors.ReportError( ResourceUseErrors::ErrorCode::MismatchHandleAnnotation, phi); props = annot.get_props(); } } Value *res = annot.get_res(); phi->setIncomingValue(i, res); } } } // Insert after phi. IRBuilder<> B(phi->getParent()->getFirstNonPHI()); CallInst *annotCI = B.CreateCall(annotHdlFn, {OpArg, undefHdl, props}); phi->replaceAllUsesWith(annotCI); annotCI->setArgOperand(DxilInst_AnnotateHandle::arg_res, phi); } else { SelectInst *sel = dyn_cast<SelectInst>(Hdl); Value *TVal = sel->getTrueValue(); Value *FVal = sel->getFalseValue(); Value *props = nullptr; if (CallInst *CI = dyn_cast<CallInst>(TVal)) { DxilInst_AnnotateHandle annot(CI); if (annot) { props = annot.get_props(); Value *res = annot.get_res(); sel->setOperand(1, res); } } if (CallInst *CI = dyn_cast<CallInst>(FVal)) { DxilInst_AnnotateHandle annot(CI); if (annot) { if (props == nullptr) { props = annot.get_props(); } else if (props != annot.get_props()) { props = resource_helper::tryMergeProps( cast<Constant>(props), cast<Constant>(annot.get_props()), propsTy, *pSM); if (props == nullptr) { Errors.ReportError( ResourceUseErrors::ErrorCode::MismatchHandleAnnotation, sel); props = annot.get_props(); } } Value *res = annot.get_res(); sel->setOperand(2, res); } } // Insert after sel. IRBuilder<> B(sel->getNextNode()); CallInst *annotCI = B.CreateCall(annotHdlFn, {OpArg, undefHdl, props}); sel->replaceAllUsesWith(annotCI); annotCI->setArgOperand(DxilInst_AnnotateHandle::arg_res, sel); } } return true; } } // namespace // Remove redudant annotateHandle. // Legalize phi on createHandleFromHeap. class DxilCleanupDynamicResourceHandle : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilCleanupDynamicResourceHandle() : ModulePass(ID) {} StringRef getPassName() const override { return "DXIL Cleanup dynamic resource handle calls"; } bool runOnModule(Module &M) override { DxilModule &DM = M.GetOrCreateDxilModule(); // Nothing to do if Dxil ver < 1.6 unsigned dxilMajor, dxilMinor; DM.GetShaderModel()->GetDxilVersion(dxilMajor, dxilMinor); if (DXIL::CompareVersions(dxilMajor, dxilMinor, 1, 6) < 0) return false; bool bChanged = sinkAnnotateHandleAfterSelect(DM, M); // Legalize phi on createHandleFromHeap. LegalizeDynamicResourceUseHelper helper; bChanged |= helper.runOnModule(M); hlsl::OP *op = DM.GetOP(); const ShaderModel *pSM = DM.GetShaderModel(); Type *propsTy = op->GetResourcePropertiesType(); // Iterate AnnotateHandle calls and eliminate redundant annotate handle call // chains. for (auto it : op->GetOpFuncList(OP::OpCode::AnnotateHandle)) { Function *F = it.second; if (F == nullptr) continue; for (auto U = F->user_begin(); U != F->user_end();) { CallInst *CI = dyn_cast<CallInst>(*(U++)); if (CI) { DxilInst_AnnotateHandle AH(CI); if (AH) { Value *Res = AH.get_res(); // Skip handle from load global res. if (isa<LoadInst>(Res)) continue; CallInst *CRes = dyn_cast<CallInst>(Res); if (!CRes) continue; DxilInst_AnnotateHandle PrevAH(CRes); if (PrevAH) { Value *mergedProps = resource_helper::tryMergeProps( cast<Constant>(AH.get_props()), cast<Constant>(PrevAH.get_props()), propsTy, *pSM); if (mergedProps == nullptr) { ResourceUseErrors Errors; Errors.ReportError( ResourceUseErrors::ErrorCode::MismatchHandleAnnotation, CI); } else if (mergedProps != PrevAH.get_props()) { PrevAH.set_props(mergedProps); } CI->replaceAllUsesWith(Res); CI->eraseFromParent(); bChanged = true; } } } } } return bChanged; } private: }; char DxilCleanupDynamicResourceHandle::ID = 0; ModulePass *llvm::createDxilCleanupDynamicResourceHandlePass() { return new DxilCleanupDynamicResourceHandle(); } INITIALIZE_PASS(DxilCleanupDynamicResourceHandle, "hlsl-dxil-cleanup-dynamic-resource-handle", "DXIL Cleanup dynamic resource handle calls", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLMatrixType.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLMatrixType.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/HLMatrixType.h" #include "dxc/Support/Global.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/IR/Value.h" using namespace llvm; using namespace hlsl; HLMatrixType::HLMatrixType(Type *RegReprElemTy, unsigned NumRows, unsigned NumColumns) : RegReprElemTy(RegReprElemTy), NumRows(NumRows), NumColumns(NumColumns) { DXASSERT(RegReprElemTy != nullptr && (RegReprElemTy->isIntegerTy() || RegReprElemTy->isFloatingPointTy()), "Invalid matrix element type."); DXASSERT(NumRows >= 1 && NumRows <= 4 && NumColumns >= 1 && NumColumns <= 4, "Invalid matrix dimensions."); } Type *HLMatrixType::getElementType(bool MemRepr) const { // Bool i1s become i32s return MemRepr && RegReprElemTy->isIntegerTy(1) ? IntegerType::get(RegReprElemTy->getContext(), 32) : RegReprElemTy; } unsigned HLMatrixType::getRowMajorIndex(unsigned RowIdx, unsigned ColIdx) const { return getRowMajorIndex(RowIdx, ColIdx, NumRows, NumColumns); } unsigned HLMatrixType::getColumnMajorIndex(unsigned RowIdx, unsigned ColIdx) const { return getColumnMajorIndex(RowIdx, ColIdx, NumRows, NumColumns); } unsigned HLMatrixType::getRowMajorIndex(unsigned RowIdx, unsigned ColIdx, unsigned NumRows, unsigned NumColumns) { DXASSERT_NOMSG(RowIdx < NumRows && ColIdx < NumColumns); return RowIdx * NumColumns + ColIdx; } unsigned HLMatrixType::getColumnMajorIndex(unsigned RowIdx, unsigned ColIdx, unsigned NumRows, unsigned NumColumns) { DXASSERT_NOMSG(RowIdx < NumRows && ColIdx < NumColumns); return ColIdx * NumRows + RowIdx; } VectorType *HLMatrixType::getLoweredVectorType(bool MemRepr) const { return VectorType::get(getElementType(MemRepr), getNumElements()); } Value *HLMatrixType::emitLoweredMemToReg(Value *Val, IRBuilder<> &Builder) const { DXASSERT(Val->getType()->getScalarType() == getElementTypeForMem(), "Lowered matrix type mismatch."); if (RegReprElemTy->isIntegerTy(1)) { Val = Builder.CreateICmpNE(Val, Constant::getNullValue(Val->getType()), "tobool"); } return Val; } Value *HLMatrixType::emitLoweredRegToMem(Value *Val, IRBuilder<> &Builder) const { DXASSERT(Val->getType()->getScalarType() == RegReprElemTy, "Lowered matrix type mismatch."); if (RegReprElemTy->isIntegerTy(1)) { Type *MemReprTy = Val->getType()->isVectorTy() ? getLoweredVectorTypeForMem() : getElementTypeForMem(); Val = Builder.CreateZExt(Val, MemReprTy, "frombool"); } return Val; } Value *HLMatrixType::emitLoweredLoad(Value *Ptr, IRBuilder<> &Builder) const { return emitLoweredMemToReg(Builder.CreateLoad(Ptr), Builder); } StoreInst *HLMatrixType::emitLoweredStore(Value *Val, Value *Ptr, IRBuilder<> &Builder) const { return Builder.CreateStore(emitLoweredRegToMem(Val, Builder), Ptr); } Value *HLMatrixType::emitLoweredVectorRowToCol(Value *VecVal, IRBuilder<> &Builder) const { DXASSERT(VecVal->getType() == getLoweredVectorTypeForReg(), "Lowered matrix type mismatch."); if (NumRows == 1 || NumColumns == 1) return VecVal; SmallVector<int, 16> ShuffleIndices; for (unsigned ColIdx = 0; ColIdx < NumColumns; ++ColIdx) for (unsigned RowIdx = 0; RowIdx < NumRows; ++RowIdx) ShuffleIndices.emplace_back((int)getRowMajorIndex(RowIdx, ColIdx)); return Builder.CreateShuffleVector(VecVal, VecVal, ShuffleIndices, "row2col"); } Value *HLMatrixType::emitLoweredVectorColToRow(Value *VecVal, IRBuilder<> &Builder) const { DXASSERT(VecVal->getType() == getLoweredVectorTypeForReg(), "Lowered matrix type mismatch."); if (NumRows == 1 || NumColumns == 1) return VecVal; SmallVector<int, 16> ShuffleIndices; for (unsigned RowIdx = 0; RowIdx < NumRows; ++RowIdx) for (unsigned ColIdx = 0; ColIdx < NumColumns; ++ColIdx) ShuffleIndices.emplace_back((int)getColumnMajorIndex(RowIdx, ColIdx)); return Builder.CreateShuffleVector(VecVal, VecVal, ShuffleIndices, "col2row"); } bool HLMatrixType::isa(Type *Ty) { StructType *StructTy = llvm::dyn_cast<StructType>(Ty); return StructTy != nullptr && !StructTy->isLiteral() && StructTy->getName().startswith(StructNamePrefix); } bool HLMatrixType::isMatrixPtr(Type *Ty) { PointerType *PtrTy = llvm::dyn_cast<PointerType>(Ty); return PtrTy != nullptr && isa(PtrTy->getElementType()); } bool HLMatrixType::isMatrixArray(Type *Ty) { ArrayType *ArrayTy = llvm::dyn_cast<ArrayType>(Ty); if (ArrayTy == nullptr) return false; while (ArrayType *NestedArrayTy = llvm::dyn_cast<ArrayType>(ArrayTy->getElementType())) ArrayTy = NestedArrayTy; return isa(ArrayTy->getElementType()); } bool HLMatrixType::isMatrixArrayPtr(Type *Ty) { PointerType *PtrTy = llvm::dyn_cast<PointerType>(Ty); if (PtrTy == nullptr) return false; return isMatrixArray(PtrTy->getElementType()); } bool HLMatrixType::isMatrixPtrOrArrayPtr(Type *Ty) { PointerType *PtrTy = llvm::dyn_cast<PointerType>(Ty); if (PtrTy == nullptr) return false; Ty = PtrTy->getElementType(); while (ArrayType *ArrayTy = llvm::dyn_cast<ArrayType>(Ty)) Ty = Ty->getArrayElementType(); return isa(Ty); } bool HLMatrixType::isMatrixOrPtrOrArrayPtr(Type *Ty) { if (PointerType *PtrTy = llvm::dyn_cast<PointerType>(Ty)) Ty = PtrTy->getElementType(); while (ArrayType *ArrayTy = llvm::dyn_cast<ArrayType>(Ty)) Ty = ArrayTy->getElementType(); return isa(Ty); } // Converts a matrix, matrix pointer, or matrix array pointer type to its // lowered equivalent. If the type is not matrix-derived, the original type is // returned. Does not lower struct types containing matrices. Type *HLMatrixType::getLoweredType(Type *Ty, bool MemRepr) { if (PointerType *PtrTy = llvm::dyn_cast<PointerType>(Ty)) { // Pointees are always in memory representation Type *LoweredElemTy = getLoweredType(PtrTy->getElementType(), /* MemRepr */ true); return LoweredElemTy == PtrTy->getElementType() ? Ty : PointerType::get(LoweredElemTy, PtrTy->getAddressSpace()); } else if (ArrayType *ArrayTy = llvm::dyn_cast<ArrayType>(Ty)) { // Arrays are always in memory and so their elements are in memory // representation Type *LoweredElemTy = getLoweredType(ArrayTy->getElementType(), /* MemRepr */ true); return LoweredElemTy == ArrayTy->getElementType() ? Ty : ArrayType::get(LoweredElemTy, ArrayTy->getNumElements()); } else if (HLMatrixType MatrixTy = HLMatrixType::dyn_cast(Ty)) { return MatrixTy.getLoweredVectorType(MemRepr); } else return Ty; } HLMatrixType HLMatrixType::cast(Type *Ty) { DXASSERT_NOMSG(isa(Ty)); StructType *StructTy = llvm::cast<StructType>(Ty); DXASSERT_NOMSG(Ty->getNumContainedTypes() == 1); ArrayType *RowArrayTy = llvm::cast<ArrayType>(StructTy->getElementType(0)); DXASSERT_NOMSG(RowArrayTy->getNumElements() >= 1 && RowArrayTy->getNumElements() <= 4); VectorType *RowTy = llvm::cast<VectorType>(RowArrayTy->getElementType()); DXASSERT_NOMSG(RowTy->getNumElements() >= 1 && RowTy->getNumElements() <= 4); return HLMatrixType(RowTy->getElementType(), RowArrayTy->getNumElements(), RowTy->getNumElements()); } HLMatrixType HLMatrixType::dyn_cast(Type *Ty) { return isa(Ty) ? cast(Ty) : HLMatrixType(); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLLowerUDT.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLLowerUDT.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Lower user defined type used directly by certain intrinsic operations. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/HLLowerUDT.h" #include "dxc/DXIL/DxilConstants.h" #include "dxc/DXIL/DxilTypeSystem.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/HLMatrixLowerHelper.h" #include "dxc/HLSL/HLMatrixType.h" #include "dxc/HLSL/HLModule.h" #include "dxc/HLSL/HLOperations.h" #include "dxc/HlslIntrinsicOp.h" #include "dxc/Support/Global.h" #include "HLMatrixSubscriptUseReplacer.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" using namespace llvm; using namespace hlsl; // Lowered UDT is the same layout, but with vectors and matrices translated to // arrays. // Returns nullptr for failure due to embedded HLSL object type. StructType *hlsl::GetLoweredUDT(StructType *structTy, DxilTypeSystem *pTypeSys) { bool changed = false; SmallVector<Type *, 8> NewElTys(structTy->getNumContainedTypes()); for (unsigned iField = 0; iField < NewElTys.size(); ++iField) { Type *FieldTy = structTy->getContainedType(iField); // Default to original type NewElTys[iField] = FieldTy; // Unwrap arrays: SmallVector<unsigned, 4> OuterToInnerLengths; Type *EltTy = dxilutil::StripArrayTypes(FieldTy, &OuterToInnerLengths); Type *NewTy = EltTy; // Lower element if necessary if (FixedVectorType *VT = dyn_cast<FixedVectorType>(EltTy)) { NewTy = ArrayType::get(VT->getElementType(), VT->getNumElements()); } else if (HLMatrixType Mat = HLMatrixType::dyn_cast(EltTy)) { NewTy = ArrayType::get(Mat.getElementType(/*MemRepr*/ true), Mat.getNumElements()); } else if (dxilutil::IsHLSLObjectType(EltTy) || dxilutil::IsHLSLRayQueryType(EltTy)) { // We cannot lower a structure with an embedded object type return nullptr; } else if (StructType *ST = dyn_cast<StructType>(EltTy)) { NewTy = GetLoweredUDT(ST); if (nullptr == NewTy) return nullptr; // Propagate failure back to root } else if (EltTy->isIntegerTy(1)) { // Must translate bool to mem type EltTy = IntegerType::get(EltTy->getContext(), 32); } // if unchanged, skip field if (NewTy == EltTy) continue; // Rewrap Arrays: for (auto itLen = OuterToInnerLengths.rbegin(), E = OuterToInnerLengths.rend(); itLen != E; ++itLen) { NewTy = ArrayType::get(NewTy, *itLen); } // Update field, and set changed NewElTys[iField] = NewTy; changed = true; } if (changed) { StructType *newStructTy = StructType::create( structTy->getContext(), NewElTys, structTy->getStructName()); if (DxilStructAnnotation *pSA = pTypeSys ? pTypeSys->GetStructAnnotation(structTy) : nullptr) { if (!pTypeSys->GetStructAnnotation(newStructTy)) { DxilStructAnnotation &NewSA = *pTypeSys->AddStructAnnotation(newStructTy); for (unsigned iField = 0; iField < NewElTys.size(); ++iField) { NewSA.GetFieldAnnotation(iField) = pSA->GetFieldAnnotation(iField); } } } return newStructTy; } return structTy; } Constant * hlsl::TranslateInitForLoweredUDT(Constant *Init, Type *NewTy, // We need orientation for matrix fields DxilTypeSystem *pTypeSys, MatrixOrientation matOrientation) { // handle undef and zero init if (isa<UndefValue>(Init)) return UndefValue::get(NewTy); else if (Init->getType()->isAggregateType() && Init->isZeroValue()) return ConstantAggregateZero::get(NewTy); // unchanged Type *Ty = Init->getType(); if (Ty == NewTy) return Init; SmallVector<Constant *, 16> values; if (Ty->isArrayTy()) { values.reserve(Ty->getArrayNumElements()); ConstantArray *CA = cast<ConstantArray>(Init); for (unsigned i = 0; i < Ty->getArrayNumElements(); ++i) values.emplace_back(TranslateInitForLoweredUDT( CA->getAggregateElement(i), NewTy->getArrayElementType(), pTypeSys, matOrientation)); return ConstantArray::get(cast<ArrayType>(NewTy), values); } else if (FixedVectorType *VT = dyn_cast<FixedVectorType>(Ty)) { values.reserve(VT->getNumElements()); ConstantVector *CV = cast<ConstantVector>(Init); for (unsigned i = 0; i < VT->getNumElements(); ++i) values.emplace_back(CV->getAggregateElement(i)); return ConstantArray::get(cast<ArrayType>(NewTy), values); } else if (HLMatrixType Mat = HLMatrixType::dyn_cast(Ty)) { values.reserve(Mat.getNumElements()); ConstantArray *MatArray = cast<ConstantArray>(cast<ConstantStruct>(Init)->getOperand(0)); for (unsigned row = 0; row < Mat.getNumRows(); ++row) { ConstantVector *RowVector = cast<ConstantVector>(MatArray->getOperand(row)); for (unsigned col = 0; col < Mat.getNumColumns(); ++col) { unsigned index = matOrientation == MatrixOrientation::ColumnMajor ? Mat.getColumnMajorIndex(row, col) : Mat.getRowMajorIndex(row, col); values[index] = RowVector->getOperand(col); } } } else if (StructType *ST = dyn_cast<StructType>(Ty)) { DxilStructAnnotation *pStructAnnotation = pTypeSys ? pTypeSys->GetStructAnnotation(ST) : nullptr; values.reserve(ST->getNumContainedTypes()); ConstantStruct *CS = cast<ConstantStruct>(Init); for (unsigned i = 0; i < ST->getStructNumElements(); ++i) { MatrixOrientation matFieldOrientation = matOrientation; if (pStructAnnotation) { DxilFieldAnnotation &FA = pStructAnnotation->GetFieldAnnotation(i); if (FA.HasMatrixAnnotation()) { matFieldOrientation = FA.GetMatrixAnnotation().Orientation; } } values.emplace_back(TranslateInitForLoweredUDT( cast<Constant>(CS->getAggregateElement(i)), NewTy->getStructElementType(i), pTypeSys, matFieldOrientation)); } return ConstantStruct::get(cast<StructType>(NewTy), values); } return Init; } static void ReplaceUsesForLoweredUDTImpl(Value *V, Value *NewV) { Type *Ty = V->getType(); Type *NewTy = NewV->getType(); if (Ty == NewTy) { V->replaceAllUsesWith(NewV); if (Instruction *I = dyn_cast<Instruction>(V)) I->dropAllReferences(); if (Constant *CV = dyn_cast<Constant>(V)) CV->removeDeadConstantUsers(); return; } DXASSERT_NOMSG(Ty->isPointerTy() && NewTy->isPointerTy()); unsigned OriginalAddrSpace = Ty->getPointerAddressSpace(); unsigned NewAddrSpace = NewTy->getPointerAddressSpace(); DXASSERT((OriginalAddrSpace == NewAddrSpace) || NewAddrSpace == DXIL::kNodeRecordAddrSpace, "Only DXIL::kNodeRecordAddrSpace are allowed when address space " "mismatch"); Ty = Ty->getPointerElementType(); NewTy = NewTy->getPointerElementType(); while (!V->use_empty()) { Use &use = *V->use_begin(); User *user = use.getUser(); if (Instruction *I = dyn_cast<Instruction>(user)) { use.set(UndefValue::get(I->getType())); } if (LoadInst *LI = dyn_cast<LoadInst>(user)) { IRBuilder<> Builder(LI); Value *result = UndefValue::get(Ty); if (Ty == NewTy) { // Ptrs differ by addrspace only result = Builder.CreateLoad(NewV); } else { // Load for non-matching type should only be vector FixedVectorType *VT = dyn_cast<FixedVectorType>(Ty); DXASSERT(VT && NewTy->isArrayTy() && VT->getNumElements() == NewTy->getArrayNumElements(), "unexpected load of non-matching type"); for (unsigned i = 0; i < VT->getNumElements(); ++i) { Value *GEP = Builder.CreateInBoundsGEP( NewV, {Builder.getInt32(0), Builder.getInt32(i)}); Value *El = Builder.CreateLoad(GEP); result = Builder.CreateInsertElement(result, El, i); } } LI->replaceAllUsesWith(result); LI->eraseFromParent(); } else if (StoreInst *SI = dyn_cast<StoreInst>(user)) { IRBuilder<> Builder(SI); if (Ty == NewTy) { // Ptrs differ by addrspace only Builder.CreateStore(SI->getValueOperand(), NewV); } else { // Store for non-matching type should only be vector FixedVectorType *VT = dyn_cast<FixedVectorType>(Ty); DXASSERT(VT && NewTy->isArrayTy() && VT->getNumElements() == NewTy->getArrayNumElements(), "unexpected load of non-matching type"); for (unsigned i = 0; i < VT->getNumElements(); ++i) { Value *EE = Builder.CreateExtractElement(SI->getValueOperand(), i); Value *GEP = Builder.CreateInBoundsGEP( NewV, {Builder.getInt32(0), Builder.getInt32(i)}); Builder.CreateStore(EE, GEP); } } SI->eraseFromParent(); } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(user)) { // Non-constant GEP IRBuilder<> Builder(GEP); SmallVector<Value *, 4> idxList(GEP->idx_begin(), GEP->idx_end()); Value *NewGEP = Builder.CreateGEP(NewV, idxList); ReplaceUsesForLoweredUDTImpl(GEP, NewGEP); GEP->eraseFromParent(); } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(user)) { // Has to be constant GEP, NewV better be constant SmallVector<Value *, 4> idxList(GEP->idx_begin(), GEP->idx_end()); Constant *NewGEP = ConstantExpr::getGetElementPtr( nullptr, cast<Constant>(NewV), idxList, true); ReplaceUsesForLoweredUDTImpl(GEP, NewGEP); } else if (AddrSpaceCastInst *AC = dyn_cast<AddrSpaceCastInst>(user)) { // Address space cast IRBuilder<> Builder(AC); Value *NewAC = Builder.CreateAddrSpaceCast( NewV, PointerType::get(Ty, AC->getType()->getPointerAddressSpace())); ReplaceUsesForLoweredUDTImpl(user, NewAC); AC->eraseFromParent(); } else if (BitCastInst *BC = dyn_cast<BitCastInst>(user)) { IRBuilder<> Builder(BC); if (BC->getType()->getPointerElementType() == NewTy) { // if alreday bitcast to new type, just replace the bitcast // with the new value (already translated user function) BC->replaceAllUsesWith(NewV); BC->eraseFromParent(); } else { // Could be i8 for memcpy? // Replace bitcast argument with new value use.set(NewV); } } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(user)) { // Constant AddrSpaceCast, or BitCast if (CE->getOpcode() == Instruction::AddrSpaceCast) { DXASSERT( CE->getType()->getPointerAddressSpace() != NewAddrSpace && OriginalAddrSpace == NewAddrSpace, "When replace Constant, V and NewV must have same address space"); Constant *NewAC = ConstantExpr::getAddrSpaceCast( cast<Constant>(NewV), PointerType::get(Ty, CE->getType()->getPointerAddressSpace())); ReplaceUsesForLoweredUDTImpl(user, NewAC); } else if (CE->getOpcode() == Instruction::BitCast) { if (CE->getType()->getPointerElementType() == NewTy) { // if alreday bitcast to new type, just replace the bitcast // with the new value CE->replaceAllUsesWith(NewV); } else { // Could be i8 for memcpy? // Replace bitcast argument with new value CE->replaceAllUsesWith( ConstantExpr::getBitCast(cast<Constant>(NewV), CE->getType())); } } else { DXASSERT(0, "unhandled constant expr for lowered UDT"); // better than infinite loop on release CE->replaceAllUsesWith(UndefValue::get(CE->getType())); } } else if (CallInst *CI = dyn_cast<CallInst>(user)) { // Lower some matrix intrinsics that access pointers early, and // cast arguments for user functions or special UDT intrinsics // for later translation. Function *F = CI->getCalledFunction(); HLOpcodeGroup group = GetHLOpcodeGroupByName(F); HLMatrixType Mat = HLMatrixType::dyn_cast(Ty); bool bColMajor = false; switch (group) { case HLOpcodeGroup::HLMatLoadStore: { DXASSERT(Mat, "otherwise, matrix operation on non-matrix value"); IRBuilder<> Builder(CI); HLMatLoadStoreOpcode opcode = static_cast<HLMatLoadStoreOpcode>(hlsl::GetHLOpcode(CI)); switch (opcode) { case HLMatLoadStoreOpcode::ColMatLoad: bColMajor = true; LLVM_FALLTHROUGH; case HLMatLoadStoreOpcode::RowMatLoad: { Value *val = UndefValue::get(VectorType::get( NewTy->getArrayElementType(), NewTy->getArrayNumElements())); for (unsigned i = 0; i < NewTy->getArrayNumElements(); ++i) { Value *GEP = Builder.CreateGEP( NewV, {Builder.getInt32(0), Builder.getInt32(i)}); Value *elt = Builder.CreateLoad(GEP); val = Builder.CreateInsertElement(val, elt, i); } if (!CI->getType()->isVectorTy()) { // Before HLMatrixLower, translate vector back to HL matrix value. if (bColMajor) { // transpose matrix to match expected value orientation for // default cast to matrix type SmallVector<int, 16> ShuffleIndices; for (unsigned RowIdx = 0; RowIdx < Mat.getNumRows(); ++RowIdx) for (unsigned ColIdx = 0; ColIdx < Mat.getNumColumns(); ++ColIdx) ShuffleIndices.emplace_back(static_cast<int>( Mat.getColumnMajorIndex(RowIdx, ColIdx))); val = Builder.CreateShuffleVector(val, val, ShuffleIndices); } // lower mem to reg type val = Mat.emitLoweredMemToReg(val, Builder); // cast vector back to matrix value (DefaultCast expects row major) unsigned newOpcode = (unsigned)HLCastOpcode::DefaultCast; val = callHLFunction(*F->getParent(), HLOpcodeGroup::HLCast, newOpcode, Ty, {Builder.getInt32(newOpcode), val}, Builder); if (bColMajor) { // emit cast row to col to match original result newOpcode = (unsigned)HLCastOpcode::RowMatrixToColMatrix; val = callHLFunction(*F->getParent(), HLOpcodeGroup::HLCast, newOpcode, Ty, {Builder.getInt32(newOpcode), val}, Builder); } } // replace use of HLMatLoadStore with loaded vector CI->replaceAllUsesWith(val); } break; case HLMatLoadStoreOpcode::ColMatStore: bColMajor = true; LLVM_FALLTHROUGH; case HLMatLoadStoreOpcode::RowMatStore: { // HLCast matrix value to vector unsigned newOpcode = (unsigned)(bColMajor ? HLCastOpcode::ColMatrixToVecCast : HLCastOpcode::RowMatrixToVecCast); Value *val = callHLFunction( *F->getParent(), HLOpcodeGroup::HLCast, newOpcode, Mat.getLoweredVectorType(false), {Builder.getInt32(newOpcode), CI->getArgOperand(HLOperandIndex::kMatStoreValOpIdx)}, Builder); // lower reg to mem type val = Mat.emitLoweredRegToMem(val, Builder); for (unsigned i = 0; i < NewTy->getArrayNumElements(); ++i) { Value *elt = Builder.CreateExtractElement(val, i); Value *GEP = Builder.CreateGEP( NewV, {Builder.getInt32(0), Builder.getInt32(i)}); Builder.CreateStore(elt, GEP); } } break; default: DXASSERT(0, "invalid opcode"); } CI->eraseFromParent(); } break; case HLOpcodeGroup::HLSubscript: { SmallVector<Value *, 4> ElemIndices; HLSubscriptOpcode opcode = static_cast<HLSubscriptOpcode>(hlsl::GetHLOpcode(CI)); switch (opcode) { case HLSubscriptOpcode::VectorSubscript: DXASSERT(0, "not handled yet"); break; case HLSubscriptOpcode::ColMatElement: bColMajor = true; LLVM_FALLTHROUGH; case HLSubscriptOpcode::RowMatElement: { ConstantDataSequential *cIdx = cast<ConstantDataSequential>( CI->getArgOperand(HLOperandIndex::kMatSubscriptSubOpIdx)); for (unsigned i = 0; i < cIdx->getNumElements(); ++i) { ElemIndices.push_back(cIdx->getElementAsConstant(i)); } } break; case HLSubscriptOpcode::ColMatSubscript: bColMajor = true; LLVM_FALLTHROUGH; case HLSubscriptOpcode::RowMatSubscript: { for (unsigned Idx = HLOperandIndex::kMatSubscriptSubOpIdx; Idx < CI->getNumArgOperands(); ++Idx) { ElemIndices.emplace_back(CI->getArgOperand(Idx)); } } break; default: DXASSERT(0, "invalid opcode"); } std::vector<Instruction *> DeadInsts; HLMatrixSubscriptUseReplacer UseReplacer( CI, NewV, /*TempLoweredMatrix*/ nullptr, ElemIndices, /*AllowLoweredPtrGEPs*/ true, DeadInsts); DXASSERT(CI->use_empty(), "Expected all matrix subscript uses to have been replaced."); CI->eraseFromParent(); while (!DeadInsts.empty()) { DeadInsts.back()->eraseFromParent(); DeadInsts.pop_back(); } } break; // case HLOpcodeGroup::NotHL: // TODO: Support lib functions case HLOpcodeGroup::HLIntrinsic: { // Just addrspace cast/bitcast for now IRBuilder<> Builder(CI); Value *Cast = NewV; if (OriginalAddrSpace != NewAddrSpace) Cast = Builder.CreateAddrSpaceCast( Cast, PointerType::get(NewTy, OriginalAddrSpace)); if (V->getType() != Cast->getType()) Cast = Builder.CreateBitCast(Cast, V->getType()); use.set(Cast); continue; } break; default: DXASSERT(0, "invalid opcode"); // Replace user with undef to prevent infinite loop on unhandled case. user->replaceAllUsesWith(UndefValue::get(user->getType())); } } else { // What else? DXASSERT(false, "case not handled."); // Replace user with undef to prevent infinite loop on unhandled case. user->replaceAllUsesWith(UndefValue::get(user->getType())); } // Clean up dead constant users to prevent infinite loop if (Constant *CV = dyn_cast<Constant>(V)) CV->removeDeadConstantUsers(); } } void hlsl::ReplaceUsesForLoweredUDT(Value *V, Value *NewV) { ReplaceUsesForLoweredUDTImpl(V, NewV); // Merge GepUse later to avoid mutate type and merge gep use at same time. dxilutil::MergeGepUse(NewV); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLModule.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLModule.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // HighLevel DX IR module. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/HLModule.h" #include "dxc/DXIL/DxilCBuffer.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilShaderModel.h" #include "dxc/DXIL/DxilTypeSystem.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/WinAdapter.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/Function.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; using std::string; using std::unique_ptr; using std::vector; namespace hlsl { // Avoid dependency on HLModule from llvm::Module using this: void HLModule_RemoveGlobal(llvm::Module *M, llvm::GlobalObject *G) { if (M && G && M->HasHLModule()) { if (llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(G)) M->GetHLModule().RemoveGlobal(GV); else if (llvm::Function *F = dyn_cast<llvm::Function>(G)) M->GetHLModule().RemoveFunction(F); } } void HLModule_ResetModule(llvm::Module *M) { if (M && M->HasHLModule()) delete &M->GetHLModule(); M->SetHLModule(nullptr); } //------------------------------------------------------------------------------ // // HLModule methods. // HLModule::HLModule(Module *pModule) : m_Ctx(pModule->getContext()), m_pModule(pModule), m_pEntryFunc(nullptr), m_EntryName(""), m_pMDHelper(llvm::make_unique<DxilMDHelper>( pModule, llvm::make_unique<HLExtraPropertyHelper>(pModule))), m_pDebugInfoFinder(nullptr), m_pSM(nullptr), m_DxilMajor(DXIL::kDxilMajor), m_DxilMinor(DXIL::kDxilMinor), m_ValMajor(0), m_ValMinor(0), m_Float32DenormMode(DXIL::Float32DenormMode::Any), m_pOP(llvm::make_unique<OP>(pModule->getContext(), pModule)), m_AutoBindingSpace(UINT_MAX), m_DefaultLinkage(DXIL::DefaultLinkage::Default), m_pTypeSystem(llvm::make_unique<DxilTypeSystem>(pModule)) { DXASSERT_NOMSG(m_pModule != nullptr); m_pModule->pfnRemoveGlobal = &HLModule_RemoveGlobal; m_pModule->pfnResetHLModule = &HLModule_ResetModule; // Pin LLVM dump methods. TODO: make debug-only. void (__thiscall Module::*pfnModuleDump)() const = &Module::dump; void (__thiscall Type::*pfnTypeDump)() const = &Type::dump; m_pUnused = (char *)&pfnModuleDump - (char *)&pfnTypeDump; } HLModule::~HLModule() { if (m_pModule->pfnRemoveGlobal == &HLModule_RemoveGlobal) m_pModule->pfnRemoveGlobal = nullptr; } LLVMContext &HLModule::GetCtx() const { return m_Ctx; } Module *HLModule::GetModule() const { return m_pModule; } OP *HLModule::GetOP() const { return m_pOP.get(); } void HLModule::SetValidatorVersion(unsigned ValMajor, unsigned ValMinor) { m_ValMajor = ValMajor; m_ValMinor = ValMinor; } void HLModule::GetValidatorVersion(unsigned &ValMajor, unsigned &ValMinor) const { ValMajor = m_ValMajor; ValMinor = m_ValMinor; } void HLModule::SetShaderModel(const ShaderModel *pSM) { DXASSERT(m_pSM == nullptr, "shader model must not change for the module"); DXASSERT(pSM != nullptr && pSM->IsValidForDxil(), "shader model must be valid"); m_pSM = pSM; m_pSM->GetDxilVersion(m_DxilMajor, m_DxilMinor); m_pMDHelper->SetShaderModel(m_pSM); m_SerializedRootSignature.clear(); } const ShaderModel *HLModule::GetShaderModel() const { return m_pSM; } uint32_t HLOptions::GetHLOptionsRaw() const { union Cast { Cast(const HLOptions &options) { hlOptions = options; } HLOptions hlOptions; uint32_t rawData; }; static_assert(sizeof(uint32_t) == sizeof(HLOptions), "size must match to make sure no undefined bits when cast"); Cast rawCast(*this); return rawCast.rawData; } void HLOptions::SetHLOptionsRaw(uint32_t data) { union Cast { Cast(uint32_t data) { rawData = data; } HLOptions hlOptions; uint64_t rawData; }; Cast rawCast(data); *this = rawCast.hlOptions; } void HLModule::SetHLOptions(HLOptions &opts) { m_Options = opts; } const HLOptions &HLModule::GetHLOptions() const { return m_Options; } void HLModule::SetAutoBindingSpace(uint32_t Space) { m_AutoBindingSpace = Space; } uint32_t HLModule::GetAutoBindingSpace() const { return m_AutoBindingSpace; } Function *HLModule::GetEntryFunction() const { return m_pEntryFunc; } Function *HLModule::GetPatchConstantFunction() { if (!m_pSM->IsHS()) return nullptr; if (!m_pEntryFunc) return nullptr; DxilFunctionProps &funcProps = GetDxilFunctionProps(m_pEntryFunc); return funcProps.ShaderProps.HS.patchConstantFunc; } void HLModule::SetEntryFunction(Function *pEntryFunc) { m_pEntryFunc = pEntryFunc; } const string &HLModule::GetEntryFunctionName() const { return m_EntryName; } void HLModule::SetEntryFunctionName(const string &name) { m_EntryName = name; } template <typename T> unsigned HLModule::AddResource(vector<unique_ptr<T>> &Vec, unique_ptr<T> pRes) { DXASSERT_NOMSG((unsigned)Vec.size() < UINT_MAX); unsigned Id = (unsigned)Vec.size(); Vec.emplace_back(std::move(pRes)); return Id; } unsigned HLModule::AddCBuffer(unique_ptr<DxilCBuffer> pCBuffer) { return AddResource<DxilCBuffer>(m_CBuffers, std::move(pCBuffer)); } DxilCBuffer &HLModule::GetCBuffer(unsigned idx) { return *m_CBuffers[idx]; } const DxilCBuffer &HLModule::GetCBuffer(unsigned idx) const { return *m_CBuffers[idx]; } const vector<unique_ptr<DxilCBuffer>> &HLModule::GetCBuffers() const { return m_CBuffers; } unsigned HLModule::AddSampler(unique_ptr<DxilSampler> pSampler) { return AddResource<DxilSampler>(m_Samplers, std::move(pSampler)); } DxilSampler &HLModule::GetSampler(unsigned idx) { return *m_Samplers[idx]; } const DxilSampler &HLModule::GetSampler(unsigned idx) const { return *m_Samplers[idx]; } const vector<unique_ptr<DxilSampler>> &HLModule::GetSamplers() const { return m_Samplers; } unsigned HLModule::AddSRV(unique_ptr<HLResource> pSRV) { return AddResource<HLResource>(m_SRVs, std::move(pSRV)); } HLResource &HLModule::GetSRV(unsigned idx) { return *m_SRVs[idx]; } const HLResource &HLModule::GetSRV(unsigned idx) const { return *m_SRVs[idx]; } const vector<unique_ptr<HLResource>> &HLModule::GetSRVs() const { return m_SRVs; } unsigned HLModule::AddUAV(unique_ptr<HLResource> pUAV) { return AddResource<HLResource>(m_UAVs, std::move(pUAV)); } HLResource &HLModule::GetUAV(unsigned idx) { return *m_UAVs[idx]; } const HLResource &HLModule::GetUAV(unsigned idx) const { return *m_UAVs[idx]; } const vector<unique_ptr<HLResource>> &HLModule::GetUAVs() const { return m_UAVs; } void HLModule::RemoveFunction(llvm::Function *F) { DXASSERT_NOMSG(F != nullptr); m_DxilFunctionPropsMap.erase(F); if (m_pTypeSystem.get()->GetFunctionAnnotation(F)) m_pTypeSystem.get()->EraseFunctionAnnotation(F); m_pOP->RemoveFunction(F); } namespace { template <typename TResource> bool RemoveResource(std::vector<std::unique_ptr<TResource>> &vec, GlobalVariable *pVariable, bool keepAllocated) { for (auto p = vec.begin(), e = vec.end(); p != e; ++p) { if ((*p)->GetGlobalSymbol() != pVariable) continue; if (keepAllocated && (*p)->IsAllocated()) { // Keep the resource, but it has no more symbol. (*p)->SetGlobalSymbol(UndefValue::get(pVariable->getType())); } else { // Erase the resource alltogether and update IDs of subsequent ones p = vec.erase(p); for (e = vec.end(); p != e; ++p) { unsigned ID = (*p)->GetID() - 1; (*p)->SetID(ID); } } return true; } return false; } } // namespace void HLModule::RemoveGlobal(llvm::GlobalVariable *GV) { DXASSERT_NOMSG(GV != nullptr); // With legacy resource reservation, we must keep unused resources around // when they have a register allocation because they prevent that // register range from being allocated to other resources. bool keepAllocated = GetHLOptions().bLegacyResourceReservation; // This could be considerably faster - check variable type to see which // resource type this is rather than scanning all lists, and look for // usage and removal patterns. if (RemoveResource(m_CBuffers, GV, keepAllocated)) return; if (RemoveResource(m_SRVs, GV, keepAllocated)) return; if (RemoveResource(m_UAVs, GV, keepAllocated)) return; if (RemoveResource(m_Samplers, GV, keepAllocated)) return; // TODO: do m_TGSMVariables and m_StreamOutputs need maintenance? } HLModule::tgsm_iterator HLModule::tgsm_begin() { return m_TGSMVariables.begin(); } HLModule::tgsm_iterator HLModule::tgsm_end() { return m_TGSMVariables.end(); } void HLModule::AddGroupSharedVariable(GlobalVariable *GV) { m_TGSMVariables.emplace_back(GV); } std::vector<uint8_t> &HLModule::GetSerializedRootSignature() { return m_SerializedRootSignature; } void HLModule::SetSerializedRootSignature(const uint8_t *pData, unsigned size) { m_SerializedRootSignature.assign(pData, pData + size); } DxilTypeSystem &HLModule::GetTypeSystem() { return *m_pTypeSystem; } DxilTypeSystem *HLModule::ReleaseTypeSystem() { return m_pTypeSystem.release(); } hlsl::OP *HLModule::ReleaseOP() { return m_pOP.release(); } DxilFunctionPropsMap &&HLModule::ReleaseFunctionPropsMap() { return std::move(m_DxilFunctionPropsMap); } void HLModule::EmitLLVMUsed() { if (m_LLVMUsed.empty()) return; vector<llvm::Constant *> GVs; GVs.resize(m_LLVMUsed.size()); for (size_t i = 0, e = m_LLVMUsed.size(); i != e; i++) { GVs[i] = ConstantExpr::getAddrSpaceCast( cast<llvm::Constant>(&*m_LLVMUsed[i]), Type::getInt8PtrTy(m_Ctx)); } ArrayType *pATy = ArrayType::get(Type::getInt8PtrTy(m_Ctx), GVs.size()); GlobalVariable *pGV = new GlobalVariable(*m_pModule, pATy, false, GlobalValue::AppendingLinkage, ConstantArray::get(pATy, GVs), "llvm.used"); pGV->setSection("llvm.metadata"); } vector<GlobalVariable *> &HLModule::GetLLVMUsed() { return m_LLVMUsed; } bool HLModule::HasDxilFunctionProps(llvm::Function *F) { return m_DxilFunctionPropsMap.find(F) != m_DxilFunctionPropsMap.end(); } DxilFunctionProps &HLModule::GetDxilFunctionProps(llvm::Function *F) { DXASSERT(m_DxilFunctionPropsMap.count(F) != 0, "cannot find F in map"); return *m_DxilFunctionPropsMap[F]; } void HLModule::AddDxilFunctionProps(llvm::Function *F, std::unique_ptr<DxilFunctionProps> &info) { DXASSERT(m_DxilFunctionPropsMap.count(F) == 0, "F already in map, info will be overwritten"); DXASSERT_NOMSG(info->shaderKind != DXIL::ShaderKind::Invalid); m_DxilFunctionPropsMap[F] = std::move(info); } void HLModule::SetPatchConstantFunctionForHS( llvm::Function *hullShaderFunc, llvm::Function *patchConstantFunc) { auto propIter = m_DxilFunctionPropsMap.find(hullShaderFunc); DXASSERT(propIter != m_DxilFunctionPropsMap.end(), "else Hull Shader missing function props"); DxilFunctionProps &props = *(propIter->second); DXASSERT(props.IsHS(), "else hullShaderFunc is not a Hull Shader"); if (props.ShaderProps.HS.patchConstantFunc) m_PatchConstantFunctions.erase(props.ShaderProps.HS.patchConstantFunc); props.ShaderProps.HS.patchConstantFunc = patchConstantFunc; if (patchConstantFunc) m_PatchConstantFunctions.insert(patchConstantFunc); } bool HLModule::IsGraphicsShader(llvm::Function *F) { return HasDxilFunctionProps(F) && GetDxilFunctionProps(F).IsGraphics(); } bool HLModule::IsPatchConstantShader(llvm::Function *F) { return m_PatchConstantFunctions.count(F) != 0; } bool HLModule::IsComputeShader(llvm::Function *F) { return HasDxilFunctionProps(F) && GetDxilFunctionProps(F).IsCS(); } bool HLModule::IsNodeShader(llvm::Function *F) { return HasDxilFunctionProps(F) && GetDxilFunctionProps(F).IsNode(); } bool HLModule::IsEntryThatUsesSignatures(llvm::Function *F) { auto propIter = m_DxilFunctionPropsMap.find(F); if (propIter != m_DxilFunctionPropsMap.end()) { DxilFunctionProps &props = *(propIter->second); return props.IsGraphics() || props.IsCS() || props.IsNode(); } // Otherwise, return true if patch constant function return IsPatchConstantShader(F); } bool HLModule::IsEntry(llvm::Function *F) { auto propIter = m_DxilFunctionPropsMap.find(F); if (propIter != m_DxilFunctionPropsMap.end()) { DXASSERT(propIter->second->shaderKind != DXIL::ShaderKind::Invalid, "invalid entry props"); return true; } // Otherwise, return true if patch constant function return IsPatchConstantShader(F); } DxilFunctionAnnotation *HLModule::GetFunctionAnnotation(llvm::Function *F) { return m_pTypeSystem->GetFunctionAnnotation(F); } DxilFunctionAnnotation *HLModule::AddFunctionAnnotation(llvm::Function *F) { DXASSERT(m_pTypeSystem->GetFunctionAnnotation(F) == nullptr, "function annotation already exist"); return m_pTypeSystem->AddFunctionAnnotation(F); } DXIL::Float32DenormMode HLModule::GetFloat32DenormMode() const { return m_Float32DenormMode; } void HLModule::SetFloat32DenormMode(const DXIL::Float32DenormMode mode) { m_Float32DenormMode = mode; } DXIL::DefaultLinkage HLModule::GetDefaultLinkage() const { return m_DefaultLinkage; } void HLModule::SetDefaultLinkage(const DXIL::DefaultLinkage linkage) { m_DefaultLinkage = linkage; } static const StringRef kHLDxilFunctionPropertiesMDName = "dx.fnprops"; static const StringRef kHLDxilOptionsMDName = "dx.options"; // DXIL metadata serialization/deserialization. void HLModule::EmitHLMetadata() { m_pMDHelper->EmitDxilVersion(m_DxilMajor, m_DxilMinor); m_pMDHelper->EmitValidatorVersion(m_ValMajor, m_ValMinor); m_pMDHelper->EmitDxilShaderModel(m_pSM); MDTuple *pMDResources = EmitHLResources(); MDTuple *pMDProperties = EmitHLShaderProperties(); m_pMDHelper->EmitDxilTypeSystem(GetTypeSystem(), m_LLVMUsed); EmitLLVMUsed(); MDTuple *const pNullMDSig = nullptr; MDTuple *pEntry = m_pMDHelper->EmitDxilEntryPointTuple( GetEntryFunction(), m_EntryName, pNullMDSig, pMDResources, pMDProperties); vector<MDNode *> Entries; Entries.emplace_back(pEntry); m_pMDHelper->EmitDxilEntryPoints(Entries); { NamedMDNode *fnProps = m_pModule->getOrInsertNamedMetadata(kHLDxilFunctionPropertiesMDName); for (auto &&pair : m_DxilFunctionPropsMap) { const hlsl::DxilFunctionProps *props = pair.second.get(); MDTuple *pProps = m_pMDHelper->EmitDxilFunctionProps(props, pair.first); fnProps->addOperand(pProps); } NamedMDNode *options = m_pModule->getOrInsertNamedMetadata(kHLDxilOptionsMDName); uint32_t hlOptions = m_Options.GetHLOptionsRaw(); options->addOperand( MDNode::get(m_Ctx, m_pMDHelper->Uint32ToConstMD(hlOptions))); options->addOperand(MDNode::get( m_Ctx, m_pMDHelper->Uint32ToConstMD(GetAutoBindingSpace()))); } if (!m_SerializedRootSignature.empty()) { m_pMDHelper->EmitRootSignature(m_SerializedRootSignature); } // Save Subobjects if (GetSubobjects()) { m_pMDHelper->EmitSubobjects(*GetSubobjects()); } } void HLModule::LoadHLMetadata() { m_pMDHelper->LoadDxilVersion(m_DxilMajor, m_DxilMinor); m_pMDHelper->LoadValidatorVersion(m_ValMajor, m_ValMinor); m_pMDHelper->LoadDxilShaderModel(m_pSM); m_SerializedRootSignature.clear(); const llvm::NamedMDNode *pEntries = m_pMDHelper->GetDxilEntryPoints(); Function *pEntryFunc; string EntryName; const llvm::MDOperand *pSignatures, *pResources, *pProperties; m_pMDHelper->GetDxilEntryPoint(pEntries->getOperand(0), pEntryFunc, EntryName, pSignatures, pResources, pProperties); SetEntryFunction(pEntryFunc); SetEntryFunctionName(EntryName); LoadHLResources(*pResources); LoadHLShaderProperties(*pProperties); m_pMDHelper->LoadDxilTypeSystem(*m_pTypeSystem.get()); { NamedMDNode *fnProps = m_pModule->getNamedMetadata(kHLDxilFunctionPropertiesMDName); size_t propIdx = 0; while (propIdx < fnProps->getNumOperands()) { MDTuple *pProps = dyn_cast<MDTuple>(fnProps->getOperand(propIdx++)); std::unique_ptr<hlsl::DxilFunctionProps> props = llvm::make_unique<hlsl::DxilFunctionProps>(); const Function *F = m_pMDHelper->LoadDxilFunctionProps(pProps, props.get()); if (props->IsHS() && props->ShaderProps.HS.patchConstantFunc) { // Add patch constant function to m_PatchConstantFunctions m_PatchConstantFunctions.insert( props->ShaderProps.HS.patchConstantFunc); } m_DxilFunctionPropsMap[F] = std::move(props); } const NamedMDNode *options = m_pModule->getOrInsertNamedMetadata(kHLDxilOptionsMDName); const MDNode *MDOptions = options->getOperand(0); m_Options.SetHLOptionsRaw( DxilMDHelper::ConstMDToUint32(MDOptions->getOperand(0))); if (options->getNumOperands() > 1) SetAutoBindingSpace( DxilMDHelper::ConstMDToUint32(options->getOperand(1)->getOperand(0))); } m_pOP->InitWithMinPrecision(m_Options.bUseMinPrecision); m_pMDHelper->LoadRootSignature(m_SerializedRootSignature); // Load Subobjects std::unique_ptr<DxilSubobjects> pSubobjects(new DxilSubobjects()); m_pMDHelper->LoadSubobjects(*pSubobjects); if (pSubobjects->GetSubobjects().size()) { ResetSubobjects(pSubobjects.release()); } } void HLModule::ClearHLMetadata(llvm::Module &M) { Module::named_metadata_iterator b = M.named_metadata_begin(), e = M.named_metadata_end(); SmallVector<NamedMDNode *, 8> nodes; for (; b != e; ++b) { StringRef name = b->getName(); if (name == DxilMDHelper::kDxilVersionMDName || name == DxilMDHelper::kDxilShaderModelMDName || name == DxilMDHelper::kDxilEntryPointsMDName || name == DxilMDHelper::kDxilRootSignatureMDName || name == DxilMDHelper::kDxilResourcesMDName || name == DxilMDHelper::kDxilTypeSystemMDName || name == DxilMDHelper::kDxilValidatorVersionMDName || name == kHLDxilFunctionPropertiesMDName || // TODO: adjust to proper name name == kHLDxilOptionsMDName || name.startswith(DxilMDHelper::kDxilTypeSystemHelperVariablePrefix)) { nodes.push_back(b); } } for (size_t i = 0; i < nodes.size(); ++i) { M.eraseNamedMetadata(nodes[i]); } } MDTuple *HLModule::EmitHLResources() { // Emit SRV records. MDTuple *pTupleSRVs = nullptr; if (!m_SRVs.empty()) { vector<Metadata *> MDVals; for (size_t i = 0; i < m_SRVs.size(); i++) { MDVals.emplace_back(m_pMDHelper->EmitDxilSRV(*m_SRVs[i])); } pTupleSRVs = MDNode::get(m_Ctx, MDVals); } // Emit UAV records. MDTuple *pTupleUAVs = nullptr; if (!m_UAVs.empty()) { vector<Metadata *> MDVals; for (size_t i = 0; i < m_UAVs.size(); i++) { MDVals.emplace_back(m_pMDHelper->EmitDxilUAV(*m_UAVs[i])); } pTupleUAVs = MDNode::get(m_Ctx, MDVals); } // Emit CBuffer records. MDTuple *pTupleCBuffers = nullptr; if (!m_CBuffers.empty()) { vector<Metadata *> MDVals; for (size_t i = 0; i < m_CBuffers.size(); i++) { MDVals.emplace_back(m_pMDHelper->EmitDxilCBuffer(*m_CBuffers[i])); } pTupleCBuffers = MDNode::get(m_Ctx, MDVals); } // Emit Sampler records. MDTuple *pTupleSamplers = nullptr; if (!m_Samplers.empty()) { vector<Metadata *> MDVals; for (size_t i = 0; i < m_Samplers.size(); i++) { MDVals.emplace_back(m_pMDHelper->EmitDxilSampler(*m_Samplers[i])); } pTupleSamplers = MDNode::get(m_Ctx, MDVals); } if (pTupleSRVs != nullptr || pTupleUAVs != nullptr || pTupleCBuffers != nullptr || pTupleSamplers != nullptr) { return m_pMDHelper->EmitDxilResourceTuple(pTupleSRVs, pTupleUAVs, pTupleCBuffers, pTupleSamplers); } else { return nullptr; } } void HLModule::LoadHLResources(const llvm::MDOperand &MDO) { const llvm::MDTuple *pSRVs, *pUAVs, *pCBuffers, *pSamplers; m_pMDHelper->GetDxilResources(MDO, pSRVs, pUAVs, pCBuffers, pSamplers); // Load SRV records. if (pSRVs != nullptr) { for (unsigned i = 0; i < pSRVs->getNumOperands(); i++) { unique_ptr<HLResource> pSRV(new HLResource); m_pMDHelper->LoadDxilSRV(pSRVs->getOperand(i), *pSRV); AddSRV(std::move(pSRV)); } } // Load UAV records. if (pUAVs != nullptr) { for (unsigned i = 0; i < pUAVs->getNumOperands(); i++) { unique_ptr<HLResource> pUAV(new HLResource); m_pMDHelper->LoadDxilUAV(pUAVs->getOperand(i), *pUAV); AddUAV(std::move(pUAV)); } } // Load CBuffer records. if (pCBuffers != nullptr) { for (unsigned i = 0; i < pCBuffers->getNumOperands(); i++) { unique_ptr<DxilCBuffer> pCB = llvm::make_unique<DxilCBuffer>(); m_pMDHelper->LoadDxilCBuffer(pCBuffers->getOperand(i), *pCB); AddCBuffer(std::move(pCB)); } } // Load Sampler records. if (pSamplers != nullptr) { for (unsigned i = 0; i < pSamplers->getNumOperands(); i++) { unique_ptr<DxilSampler> pSampler(new DxilSampler); m_pMDHelper->LoadDxilSampler(pSamplers->getOperand(i), *pSampler); AddSampler(std::move(pSampler)); } } } MDTuple *HLModule::EmitHLShaderProperties() { return nullptr; } void HLModule::LoadHLShaderProperties(const MDOperand &MDO) { return; } DxilResourceBase * HLModule::AddResourceWithGlobalVariableAndProps(llvm::Constant *GV, DxilResourceProperties &RP) { DxilResource::Class RC = RP.getResourceClass(); DxilResource::Kind RK = RP.getResourceKind(); unsigned rangeSize = 1; Type *Ty = GV->getType()->getPointerElementType(); if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) rangeSize = AT->getNumElements(); DxilResourceBase *R = nullptr; switch (RC) { case DxilResource::Class::Sampler: { std::unique_ptr<DxilSampler> S = llvm::make_unique<DxilSampler>(); if (RP.Basic.SamplerCmpOrHasCounter) S->SetSamplerKind(DxilSampler::SamplerKind::Comparison); else S->SetSamplerKind(DxilSampler::SamplerKind::Default); S->SetKind(RK); S->SetGlobalSymbol(GV); S->SetGlobalName(GV->getName()); S->SetRangeSize(rangeSize); R = S.get(); AddSampler(std::move(S)); } break; case DxilResource::Class::SRV: { std::unique_ptr<HLResource> Res = llvm::make_unique<HLResource>(); if (DXIL::IsTyped(RP.getResourceKind())) { Res->SetCompType(RP.Typed.CompType); } else if (DXIL::IsStructuredBuffer(RK)) { Res->SetElementStride(RP.StructStrideInBytes); } Res->SetRW(false); Res->SetKind(RK); Res->SetGlobalSymbol(GV); Res->SetGlobalName(GV->getName()); Res->SetRangeSize(rangeSize); R = Res.get(); AddSRV(std::move(Res)); } break; case DxilResource::Class::UAV: { std::unique_ptr<HLResource> Res = llvm::make_unique<HLResource>(); if (DXIL::IsTyped(RK)) { Res->SetCompType(RP.Typed.CompType); } else if (DXIL::IsStructuredBuffer(RK)) { Res->SetElementStride(RP.StructStrideInBytes); } Res->SetRW(true); Res->SetROV(RP.Basic.IsROV); Res->SetGloballyCoherent(RP.Basic.IsGloballyCoherent); Res->SetHasCounter(RP.Basic.SamplerCmpOrHasCounter); Res->SetKind(RK); Res->SetGlobalSymbol(GV); Res->SetGlobalName(GV->getName()); Res->SetRangeSize(rangeSize); R = Res.get(); AddUAV(std::move(Res)); } break; default: DXASSERT(0, "Invalid metadata for AddResourceWithGlobalVariableAndMDNode"); } return R; } static uint64_t getRegBindingKey(unsigned CbID, unsigned ConstantIdx) { return (uint64_t)(CbID) << 32 | ConstantIdx; } void HLModule::AddRegBinding(unsigned CbID, unsigned ConstantIdx, unsigned Srv, unsigned Uav, unsigned Sampler) { uint64_t Key = getRegBindingKey(CbID, ConstantIdx); m_SrvBindingInCB[Key] = Srv; m_UavBindingInCB[Key] = Uav; m_SamplerBindingInCB[Key] = Sampler; } // Helper functions for resource in cbuffer. namespace { DXIL::ResourceClass GetRCFromType(StructType *ST, Module &M) { for (Function &F : M.functions()) { if (F.user_empty()) continue; hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroup(&F); if (group != HLOpcodeGroup::HLAnnotateHandle) continue; Type *Ty = F.getFunctionType()->getParamType( HLOperandIndex::kAnnotateHandleResourceTypeOpIdx); if (Ty != ST) continue; CallInst *CI = cast<CallInst>(F.user_back()); Constant *Props = cast<Constant>(CI->getArgOperand( HLOperandIndex::kAnnotateHandleResourcePropertiesOpIdx)); DxilResourceProperties RP = resource_helper::loadPropsFromConstant(*Props); return RP.getResourceClass(); } return DXIL::ResourceClass::Invalid; } unsigned CountResNum(Module &M, Type *Ty, DXIL::ResourceClass RC) { // Count num of RCs. unsigned ArraySize = 1; while (ArrayType *AT = dyn_cast<ArrayType>(Ty)) { ArraySize *= AT->getNumElements(); Ty = AT->getElementType(); } if (!Ty->isAggregateType()) return 0; StructType *ST = dyn_cast<StructType>(Ty); DXIL::ResourceClass TmpRC = GetRCFromType(ST, M); if (TmpRC == RC) return ArraySize; unsigned Size = 0; for (Type *EltTy : ST->elements()) { Size += CountResNum(M, EltTy, RC); } return Size * ArraySize; } // Note: the rule for register binding on struct array is like this: // struct X { // Texture2D x; // SamplerState s ; // Texture2D y; // }; // X x[2] : register(t3) : register(s3); // x[0].x t3 // x[0].s s3 // x[0].y t4 // x[1].x t5 // x[1].s s4 // x[1].y t6 // So x[0].x and x[1].x not in an array. unsigned CalcRegBinding(gep_type_iterator GEPIt, gep_type_iterator E, Module &M, DXIL::ResourceClass RC) { unsigned NumRC = 0; // Count GEP offset when only count RC size. for (; GEPIt != E; GEPIt++) { Type *Ty = *GEPIt; Value *idx = GEPIt.getOperand(); Constant *constIdx = dyn_cast<Constant>(idx); unsigned immIdx = constIdx->getUniqueInteger().getLimitedValue(); // Not support dynamic indexing. // Array should be just 1d res array as global res. if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) { NumRC += immIdx * CountResNum(M, AT->getElementType(), RC); } else if (StructType *ST = dyn_cast<StructType>(Ty)) { for (unsigned i = 0; i < immIdx; i++) { NumRC += CountResNum(M, ST->getElementType(i), RC); } } } return NumRC; } } // namespace unsigned HLModule::GetBindingForResourceInCB(GetElementPtrInst *CbPtr, GlobalVariable *CbGV, DXIL::ResourceClass RC) { if (!CbPtr->hasAllConstantIndices()) { // Not support dynmaic indexing resource array inside cb. string ErrorMsg( "Index for resource array inside cbuffer must be a literal expression"); dxilutil::EmitErrorOnInstruction(CbPtr, ErrorMsg); return UINT_MAX; } Module &M = *m_pModule; unsigned RegBinding = UINT_MAX; for (auto &CB : m_CBuffers) { if (CbGV != CB->GetGlobalSymbol()) continue; gep_type_iterator GEPIt = gep_type_begin(CbPtr), E = gep_type_end(CbPtr); // The pointer index. GEPIt++; unsigned ID = CB->GetID(); unsigned idx = cast<ConstantInt>(GEPIt.getOperand())->getLimitedValue(); // The first level index to get current constant. GEPIt++; uint64_t Key = getRegBindingKey(ID, idx); switch (RC) { default: break; case DXIL::ResourceClass::SRV: if (m_SrvBindingInCB.count(Key)) RegBinding = m_SrvBindingInCB[Key]; break; case DXIL::ResourceClass::UAV: if (m_UavBindingInCB.count(Key)) RegBinding = m_UavBindingInCB[Key]; break; case DXIL::ResourceClass::Sampler: if (m_SamplerBindingInCB.count(Key)) RegBinding = m_SamplerBindingInCB[Key]; break; } if (RegBinding == UINT_MAX) break; // Calc RegBinding. RegBinding += CalcRegBinding(GEPIt, E, M, RC); break; } return RegBinding; } // TODO: Don't check names. bool HLModule::IsStreamOutputType(llvm::Type *Ty) { if (StructType *ST = dyn_cast<StructType>(Ty)) { StringRef name = ST->getName(); if (name.startswith("class.PointStream")) return true; if (name.startswith("class.LineStream")) return true; if (name.startswith("class.TriangleStream")) return true; } return false; } bool HLModule::IsStreamOutputPtrType(llvm::Type *Ty) { if (!Ty->isPointerTy()) return false; Ty = Ty->getPointerElementType(); return IsStreamOutputType(Ty); } void HLModule::GetParameterRowsAndCols( Type *Ty, unsigned &rows, unsigned &cols, DxilParameterAnnotation &paramAnnotation) { if (Ty->isPointerTy()) Ty = Ty->getPointerElementType(); // For array input of HS, DS, GS, // we need to skip the first level which size is based on primitive type. DxilParamInputQual inputQual = paramAnnotation.GetParamInputQual(); bool skipOneLevelArray = inputQual == DxilParamInputQual::InputPatch; skipOneLevelArray |= inputQual == DxilParamInputQual::OutputPatch; skipOneLevelArray |= inputQual == DxilParamInputQual::InputPrimitive; skipOneLevelArray |= inputQual == DxilParamInputQual::OutVertices; skipOneLevelArray |= inputQual == DxilParamInputQual::OutPrimitives; if (skipOneLevelArray) { if (Ty->isArrayTy()) Ty = Ty->getArrayElementType(); } unsigned arraySize = 1; while (Ty->isArrayTy()) { arraySize *= Ty->getArrayNumElements(); Ty = Ty->getArrayElementType(); } rows = 1; cols = 1; if (paramAnnotation.HasMatrixAnnotation()) { const DxilMatrixAnnotation &matrix = paramAnnotation.GetMatrixAnnotation(); if (matrix.Orientation == MatrixOrientation::RowMajor) { rows = matrix.Rows; cols = matrix.Cols; } else { DXASSERT_NOMSG(matrix.Orientation == MatrixOrientation::ColumnMajor); cols = matrix.Rows; rows = matrix.Cols; } } else if (FixedVectorType *VT = dyn_cast<FixedVectorType>(Ty)) cols = VT->getNumElements(); rows *= arraySize; } llvm::Function *HLModule::GetHLOperationFunction( HLOpcodeGroup group, unsigned opcode, llvm::Type *RetType, llvm::ArrayRef<llvm::Value *> paramList, llvm::Module &M) { SmallVector<llvm::Type *, 4> paramTyList; // Add the opcode param llvm::Type *opcodeTy = llvm::Type::getInt32Ty(M.getContext()); paramTyList.emplace_back(opcodeTy); for (Value *param : paramList) { paramTyList.emplace_back(param->getType()); } llvm::FunctionType *funcTy = llvm::FunctionType::get(RetType, paramTyList, false); Function *opFunc = GetOrCreateHLFunction(M, funcTy, group, opcode); return opFunc; } template CallInst *HLModule::EmitHLOperationCall(IRBuilder<> &Builder, HLOpcodeGroup group, unsigned opcode, Type *RetType, ArrayRef<Value *> paramList, llvm::Module &M); template <typename BuilderTy> CallInst *HLModule::EmitHLOperationCall(BuilderTy &Builder, HLOpcodeGroup group, unsigned opcode, Type *RetType, ArrayRef<Value *> paramList, llvm::Module &M) { // Add the opcode param llvm::Type *opcodeTy = llvm::Type::getInt32Ty(M.getContext()); Function *opFunc = GetHLOperationFunction(group, opcode, RetType, paramList, M); SmallVector<Value *, 4> opcodeParamList; Value *opcodeConst = Constant::getIntegerValue(opcodeTy, APInt(32, opcode)); opcodeParamList.emplace_back(opcodeConst); opcodeParamList.append(paramList.begin(), paramList.end()); return Builder.CreateCall(opFunc, opcodeParamList); } unsigned HLModule::GetNumericCastOp(llvm::Type *SrcTy, bool SrcIsUnsigned, llvm::Type *DstTy, bool DstIsUnsigned) { DXASSERT(SrcTy != DstTy, "No-op conversions are not casts and should have " "been handled by the callee."); uint32_t SrcBitSize = SrcTy->getScalarSizeInBits(); uint32_t DstBitSize = DstTy->getScalarSizeInBits(); bool SrcIsInt = SrcTy->isIntOrIntVectorTy(); bool DstIsInt = DstTy->isIntOrIntVectorTy(); DXASSERT(DstBitSize != 1, "Conversions to bool are not a cast and should " "have been handled by the callee."); // Conversions from bools are like unsigned integer widening if (SrcBitSize == 1) SrcIsUnsigned = true; if (SrcIsInt) { if (DstIsInt) { // int to int if (SrcBitSize > DstBitSize) return Instruction::Trunc; // unsigned to unsigned: zext // unsigned to signed: zext (fully representable) // signed to signed: sext // signed to unsigned: sext (like C++) return SrcIsUnsigned ? Instruction::ZExt : Instruction::SExt; } else { // int to float return SrcIsUnsigned ? Instruction::UIToFP : Instruction::SIToFP; } } else { if (DstIsInt) { // float to int return DstIsUnsigned ? Instruction::FPToUI : Instruction::FPToSI; } else { // float to float return SrcBitSize > DstBitSize ? Instruction::FPTrunc : Instruction::FPExt; } } } bool HLModule::HasPreciseAttributeWithMetadata(Instruction *I) { return DxilMDHelper::IsMarkedPrecise(I); } void HLModule::MarkPreciseAttributeWithMetadata(Instruction *I) { return DxilMDHelper::MarkPrecise(I); } void HLModule::ClearPreciseAttributeWithMetadata(Instruction *I) { I->setMetadata(DxilMDHelper::kDxilPreciseAttributeMDName, nullptr); } static void MarkPreciseAttribute(Function *F) { LLVMContext &Ctx = F->getContext(); MDNode *preciseNode = MDNode::get( Ctx, {MDString::get(Ctx, DxilMDHelper::kDxilPreciseAttributeMDName)}); F->setMetadata(DxilMDHelper::kDxilPreciseAttributeMDName, preciseNode); } template <typename BuilderTy> void HLModule::MarkPreciseAttributeOnValWithFunctionCall(llvm::Value *V, BuilderTy &Builder, llvm::Module &M) { Type *Ty = V->getType(); Type *EltTy = Ty->getScalarType(); // TODO: Only do this on basic types. FunctionType *preciseFuncTy = FunctionType::get(Type::getVoidTy(M.getContext()), {EltTy}, false); // The function will be deleted after precise propagate. std::string preciseFuncName = "dx.attribute.precise."; raw_string_ostream mangledNameStr(preciseFuncName); EltTy->print(mangledNameStr); mangledNameStr.flush(); Function *preciseFunc = cast<Function>(M.getOrInsertFunction(preciseFuncName, preciseFuncTy)); if (!HLModule::HasPreciseAttribute(preciseFunc)) MarkPreciseAttribute(preciseFunc); if (FixedVectorType *VT = dyn_cast<FixedVectorType>(Ty)) { for (unsigned i = 0; i < VT->getNumElements(); i++) { Value *Elt = Builder.CreateExtractElement(V, i); Builder.CreateCall(preciseFunc, {Elt}); } } else Builder.CreateCall(preciseFunc, {V}); } void HLModule::MarkPreciseAttributeOnPtrWithFunctionCall(llvm::Value *Ptr, llvm::Module &M) { for (User *U : Ptr->users()) { // Skip load inst. if (dyn_cast<LoadInst>(U)) continue; if (StoreInst *SI = dyn_cast<StoreInst>(U)) { Value *V = SI->getValueOperand(); if (isa<Instruction>(V)) { // Mark the Value with function call. IRBuilder<> Builder(SI); MarkPreciseAttributeOnValWithFunctionCall(V, Builder, M); } } else if (CallInst *CI = dyn_cast<CallInst>(U)) { if (CI->getType()->isVoidTy()) { IRBuilder<> Builder(CI); // For void type, cannot use as function arg. // Mark all arg for it? for (auto &arg : CI->arg_operands()) { MarkPreciseAttributeOnValWithFunctionCall(arg, Builder, M); } } else { if (CI->getType()->isPointerTy()) { // For instance, matrix subscript... MarkPreciseAttributeOnPtrWithFunctionCall(CI, M); } else { IRBuilder<> Builder(CI->getNextNode()); MarkPreciseAttributeOnValWithFunctionCall(CI, Builder, M); } } } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { // Do not mark bitcasts. We only expect them here due to lifetime // intrinsics. DXASSERT(onlyUsedByLifetimeMarkers(BCI), "expected bitcast to only be used by lifetime intrinsics"); } else { // Must be GEP here. GetElementPtrInst *GEP = cast<GetElementPtrInst>(U); MarkPreciseAttributeOnPtrWithFunctionCall(GEP, M); } } } bool HLModule::HasPreciseAttribute(Function *F) { MDNode *preciseNode = F->getMetadata(DxilMDHelper::kDxilPreciseAttributeMDName); return preciseNode != nullptr; } static void AddDIGlobalVariable(DIBuilder &Builder, DIGlobalVariable *LocDIGV, StringRef Name, DIType *DITy, GlobalVariable *GV, DebugInfoFinder &DbgInfoFinder, bool removeLocDIGV) { DIGlobalVariable *EltDIGV = Builder.createGlobalVariable( LocDIGV->getScope(), Name, GV->getName(), LocDIGV->getFile(), LocDIGV->getLine(), DITy, false, GV); DICompileUnit *DICU = nullptr; std::vector<Metadata *> AllGVs; std::vector<Metadata *>::iterator locIt; for (auto itDICU : DbgInfoFinder.compile_units()) { MDTuple *GTuple = cast_or_null<MDTuple>(itDICU->getRawGlobalVariables()); if (!GTuple) continue; AllGVs.assign(GTuple->operands().begin(), GTuple->operands().end()); locIt = std::find(AllGVs.begin(), AllGVs.end(), LocDIGV); if (locIt == AllGVs.end()) continue; DICU = itDICU; break; } DXASSERT_NOMSG(DICU); if (!DICU) return; // Add global to CU. if (removeLocDIGV) { AllGVs.erase(locIt); } AllGVs.emplace_back(EltDIGV); DICU->replaceGlobalVariables(MDTuple::get(GV->getContext(), AllGVs)); DXVERIFY_NOMSG(DbgInfoFinder.appendGlobalVariable(EltDIGV)); } static unsigned GetCompositeTypeSize(DIType *Ty) { DICompositeType *StructTy = nullptr; DITypeIdentifierMap EmptyMap; if (DIDerivedType *DerivedTy = dyn_cast<DIDerivedType>(Ty)) { DXASSERT_NOMSG(DerivedTy->getTag() == dwarf::DW_TAG_const_type || DerivedTy->getTag() == dwarf::DW_TAG_typedef); DIType *BaseTy = DerivedTy->getBaseType().resolve(EmptyMap); return GetCompositeTypeSize(BaseTy); } else { StructTy = cast<DICompositeType>(Ty); } return StructTy->getSizeInBits(); } void HLModule::CreateElementGlobalVariableDebugInfo( GlobalVariable *GV, DebugInfoFinder &DbgInfoFinder, GlobalVariable *EltGV, unsigned sizeInBits, unsigned alignInBits, unsigned offsetInBits, StringRef eltName) { DIGlobalVariable *DIGV = dxilutil::FindGlobalVariableDebugInfo(GV, DbgInfoFinder); if (!DIGV) { DXASSERT(DIGV, "DIGV Parameter must be non-null"); return; } DIBuilder Builder(*GV->getParent()); DITypeIdentifierMap EmptyMap; DIType *DITy = DIGV->getType().resolve(EmptyMap); DIScope *DITyScope = DITy->getScope().resolve(EmptyMap); // If element size is greater than base size make sure we're dealing with an // empty struct. unsigned compositeSize = GetCompositeTypeSize(DITy); if (sizeInBits > compositeSize) { DXASSERT_NOMSG(offsetInBits == 0 && compositeSize == 8); sizeInBits = compositeSize; } // Create Elt type. DIType *EltDITy = Builder.createMemberType(DITyScope, DITy->getName().str() + eltName.str(), DITy->getFile(), DITy->getLine(), sizeInBits, alignInBits, offsetInBits, /*Flags*/ 0, DITy); AddDIGlobalVariable(Builder, DIGV, DIGV->getName().str() + eltName.str(), EltDITy, EltGV, DbgInfoFinder, /*removeDIGV*/ false); } void HLModule::UpdateGlobalVariableDebugInfo( llvm::GlobalVariable *GV, llvm::DebugInfoFinder &DbgInfoFinder, llvm::GlobalVariable *NewGV) { DIGlobalVariable *DIGV = dxilutil::FindGlobalVariableDebugInfo(GV, DbgInfoFinder); if (!DIGV) { DXASSERT(DIGV, "DIGV Parameter must be non-null"); return; } DIBuilder Builder(*GV->getParent()); DITypeIdentifierMap EmptyMap; DIType *DITy = DIGV->getType().resolve(EmptyMap); AddDIGlobalVariable(Builder, DIGV, DIGV->getName(), DITy, NewGV, DbgInfoFinder, /*removeDIGV*/ true); } DebugInfoFinder &HLModule::GetOrCreateDebugInfoFinder() { if (m_pDebugInfoFinder == nullptr) { m_pDebugInfoFinder = llvm::make_unique<llvm::DebugInfoFinder>(); m_pDebugInfoFinder->processModule(*m_pModule); } return *m_pDebugInfoFinder; } //------------------------------------------------------------------------------ // // Subobject methods. // DxilSubobjects *HLModule::GetSubobjects() { return m_pSubobjects.get(); } const DxilSubobjects *HLModule::GetSubobjects() const { return m_pSubobjects.get(); } DxilSubobjects *HLModule::ReleaseSubobjects() { return m_pSubobjects.release(); } void HLModule::ResetSubobjects(DxilSubobjects *subobjects) { m_pSubobjects.reset(subobjects); } //------------------------------------------------------------------------------ // // Signature methods. // HLExtraPropertyHelper::HLExtraPropertyHelper(llvm::Module *pModule) : DxilExtraPropertyHelper(pModule) {} void HLExtraPropertyHelper::EmitSignatureElementProperties( const DxilSignatureElement &SE, vector<Metadata *> &MDVals) {} void HLExtraPropertyHelper::LoadSignatureElementProperties( const MDOperand &MDO, DxilSignatureElement &SE) { if (MDO.get() == nullptr) return; } } // namespace hlsl namespace llvm { hlsl::HLModule &Module::GetOrCreateHLModule(bool skipInit) { std::unique_ptr<hlsl::HLModule> M; if (!HasHLModule()) { M = llvm::make_unique<hlsl::HLModule>(this); if (!skipInit) { M->LoadHLMetadata(); } SetHLModule(M.release()); } return GetHLModule(); } } // namespace llvm
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilConvergent.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilConvergent.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Mark convergent for hlsl. // // // /////////////////////////////////////////////////////////////////////////////// #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" #include "llvm/Support/GenericDomTree.h" #include "llvm/Support/raw_os_ostream.h" #include "dxc/DXIL/DxilConstants.h" #include "dxc/HLSL/DxilConvergentName.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/HLSL/HLModule.h" #include "dxc/HLSL/HLOperations.h" #include "dxc/HlslIntrinsicOp.h" using namespace llvm; using namespace hlsl; /////////////////////////////////////////////////////////////////////////////// // DxilConvergent. // Mark convergent to avoid sample coordnate calculation sink into control flow. // namespace { class DxilConvergentMark : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilConvergentMark() : ModulePass(ID) {} StringRef getPassName() const override { return "DxilConvergentMark"; } bool runOnModule(Module &M) override { if (M.HasHLModule()) { const ShaderModel *SM = M.GetHLModule().GetShaderModel(); if (!SM->IsPS() && !SM->IsLib() && (!SM->IsSM66Plus() || (!SM->IsCS() && !SM->IsMS() && !SM->IsAS()))) return false; } bool bUpdated = false; for (Function &F : M.functions()) { if (F.isDeclaration()) continue; // Compute postdominator relation. DominatorTreeBase<BasicBlock> PDR(true); PDR.recalculate(F); for (BasicBlock &bb : F.getBasicBlockList()) { for (auto it = bb.begin(); it != bb.end();) { Instruction *I = (it++); if (Value *V = FindConvergentOperand(I)) { if (PropagateConvergent(V, &F, PDR)) { // TODO: emit warning here. } bUpdated = true; } } } } return bUpdated; } private: void MarkConvergent(Value *V, IRBuilder<> &Builder, Module &M); Value *FindConvergentOperand(Instruction *I); bool PropagateConvergent(Value *V, Function *F, DominatorTreeBase<BasicBlock> &PostDom); bool PropagateConvergentImpl(Value *V, Function *F, DominatorTreeBase<BasicBlock> &PostDom, std::set<Value *> &visited); }; char DxilConvergentMark::ID = 0; void DxilConvergentMark::MarkConvergent(Value *V, IRBuilder<> &Builder, Module &M) { Type *Ty = V->getType()->getScalarType(); // Only work on vector/scalar types. if (Ty->isAggregateType() || Ty->isPointerTy()) return; FunctionType *FT = FunctionType::get(Ty, Ty, false); std::string str = kConvergentFunctionPrefix; raw_string_ostream os(str); Ty->print(os); os.flush(); Function *ConvF = cast<Function>(M.getOrInsertFunction(str, FT)); ConvF->addFnAttr(Attribute::AttrKind::Convergent); if (VectorType *VT = dyn_cast<VectorType>(V->getType())) { Value *ConvV = UndefValue::get(V->getType()); std::vector<ExtractElementInst *> extractList(VT->getNumElements()); for (unsigned i = 0; i < VT->getNumElements(); i++) { ExtractElementInst *EltV = cast<ExtractElementInst>(Builder.CreateExtractElement(V, i)); extractList[i] = EltV; Value *EltC = Builder.CreateCall(ConvF, {EltV}); ConvV = Builder.CreateInsertElement(ConvV, EltC, i); } V->replaceAllUsesWith(ConvV); for (ExtractElementInst *E : extractList) { E->setOperand(0, V); } } else { CallInst *ConvV = Builder.CreateCall(ConvF, {V}); V->replaceAllUsesWith(ConvV); ConvV->setOperand(0, V); } } bool DxilConvergentMark::PropagateConvergent( Value *V, Function *F, DominatorTreeBase<BasicBlock> &PostDom) { std::set<Value *> visited; return PropagateConvergentImpl(V, F, PostDom, visited); } bool DxilConvergentMark::PropagateConvergentImpl( Value *V, Function *F, DominatorTreeBase<BasicBlock> &PostDom, std::set<Value *> &visited) { // Don't go through already visted nodes if (visited.find(V) != visited.end()) return false; // Mark as visited visited.insert(V); // Skip constant. if (isa<Constant>(V)) return false; // Skip phi which cannot sink. if (isa<PHINode>(V)) return false; if (Instruction *I = dyn_cast<Instruction>(V)) { BasicBlock *BB = I->getParent(); if (PostDom.dominates(BB, &F->getEntryBlock())) { IRBuilder<> Builder(I->getNextNode()); MarkConvergent(I, Builder, *F->getParent()); return false; } else { // Propagete to each operand of I. for (Use &U : I->operands()) { PropagateConvergentImpl(U.get(), F, PostDom, visited); } // return true for report warning. // TODO: static indexing cbuffer is fine. return true; } } else { IRBuilder<> EntryBuilder(F->getEntryBlock().getFirstInsertionPt()); MarkConvergent(V, EntryBuilder, *F->getParent()); return false; } } Value *DxilConvergentMark::FindConvergentOperand(Instruction *I) { if (CallInst *CI = dyn_cast<CallInst>(I)) { if (hlsl::GetHLOpcodeGroup(CI->getCalledFunction()) == HLOpcodeGroup::HLIntrinsic) { IntrinsicOp IOP = static_cast<IntrinsicOp>(GetHLOpcode(CI)); switch (IOP) { case IntrinsicOp::IOP_ddx: case IntrinsicOp::IOP_ddx_fine: case IntrinsicOp::IOP_ddx_coarse: case IntrinsicOp::IOP_ddy: case IntrinsicOp::IOP_ddy_fine: case IntrinsicOp::IOP_ddy_coarse: return CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); case IntrinsicOp::MOP_Sample: case IntrinsicOp::MOP_SampleBias: case IntrinsicOp::MOP_SampleCmp: case IntrinsicOp::MOP_CalculateLevelOfDetail: case IntrinsicOp::MOP_CalculateLevelOfDetailUnclamped: return CI->getArgOperand(HLOperandIndex::kSampleCoordArgIndex); case IntrinsicOp::MOP_WriteSamplerFeedback: case IntrinsicOp::MOP_WriteSamplerFeedbackBias: return CI->getArgOperand( HLOperandIndex::kWriteSamplerFeedbackCoordArgIndex); default: // No other ops have convergent operands. break; } } } return nullptr; } } // namespace INITIALIZE_PASS(DxilConvergentMark, "hlsl-dxil-convergent-mark", "Mark convergent", false, false) ModulePass *llvm::createDxilConvergentMarkPass() { return new DxilConvergentMark(); } namespace { class DxilConvergentClear : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilConvergentClear() : ModulePass(ID) {} StringRef getPassName() const override { return "DxilConvergentClear"; } bool runOnModule(Module &M) override { std::vector<Function *> convergentList; for (Function &F : M.functions()) { if (F.getName().startswith(kConvergentFunctionPrefix)) { convergentList.emplace_back(&F); } } for (Function *F : convergentList) { ClearConvergent(F); } return convergentList.size(); } private: void ClearConvergent(Function *F); }; char DxilConvergentClear::ID = 0; void DxilConvergentClear::ClearConvergent(Function *F) { // Replace all users with arg. for (auto it = F->user_begin(); it != F->user_end();) { CallInst *CI = cast<CallInst>(*(it++)); Value *arg = CI->getArgOperand(0); CI->replaceAllUsesWith(arg); CI->eraseFromParent(); } F->eraseFromParent(); } } // namespace INITIALIZE_PASS(DxilConvergentClear, "hlsl-dxil-convergent-clear", "Clear convergent before dxil emit", false, false) ModulePass *llvm::createDxilConvergentClearPass() { return new DxilConvergentClear(); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLLegalizeParameter.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLLegalizeParameter.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Legalize in parameter has write and out parameter has read. // // Must be call before inline pass. // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilTypeSystem.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/HLSL/HLModule.h" #include "dxc/HLSL/HLUtil.h" #include "llvm/IR/IntrinsicInst.h" #include "dxc/Support/Global.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/Casting.h" #include <vector> using namespace llvm; using namespace hlsl; // For parameter need to legalize, create alloca to replace all uses of it, and // copy between the alloca and the parameter. namespace { class HLLegalizeParameter : public ModulePass { public: static char ID; explicit HLLegalizeParameter() : ModulePass(ID) {} bool runOnModule(Module &M) override; private: void patchWriteOnInParam(Function &F, Argument &Arg, const DataLayout &DL); void patchReadOnOutParam(Function &F, Argument &Arg, const DataLayout &DL); }; AllocaInst *createAllocaForPatch(Function &F, Type *Ty) { IRBuilder<> Builder(F.getEntryBlock().getFirstInsertionPt()); return Builder.CreateAlloca(Ty); } void copyIn(AllocaInst *temp, Value *arg, CallInst *CI, unsigned size) { if (size == 0) return; // Copy arg to temp before CI. IRBuilder<> Builder(CI); Builder.CreateMemCpy(temp, arg, size, 1); } void copyOut(AllocaInst *temp, Value *arg, CallInst *CI, unsigned size) { if (size == 0) return; // Copy temp to arg after CI. IRBuilder<> Builder(CI->getNextNode()); Builder.CreateMemCpy(arg, temp, size, 1); } bool isPointerNeedToLower(Value *V, Type *HandleTy) { // CBuffer, Buffer, Texture.... // Anything related to dxil op. // hl.subscript. // Got to root of GEP. while (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { V = GEP->getPointerOperand(); } CallInst *CI = dyn_cast<CallInst>(V); if (!CI) { // If array of vector, we need a copy to handle vector to array in // LowerTypePasses. Type *Ty = V->getType(); if (Ty->isPointerTy()) Ty = Ty->getPointerElementType(); if (!Ty->isArrayTy()) return false; while (Ty->isArrayTy()) { Ty = Ty->getArrayElementType(); } return Ty->isVectorTy(); } HLOpcodeGroup group = GetHLOpcodeGroup(CI->getCalledFunction()); if (group != HLOpcodeGroup::HLSubscript) return false; Value *Ptr = CI->getArgOperand(HLOperandIndex::kSubscriptObjectOpIdx); // Ptr from resource handle. if (Ptr->getType() == HandleTy) return true; unsigned Opcode = GetHLOpcode(CI); // Ptr from cbuffer. if (Opcode == (unsigned)HLSubscriptOpcode::CBufferSubscript) return true; return isPointerNeedToLower(Ptr, HandleTy); } bool mayAliasWithGlobal(Value *V, CallInst *CallSite, std::vector<GlobalVariable *> &staticGVs) { // The unsafe case need copy-in copy-out will be global variable alias with // parameter. Then global variable is updated in the function, the parameter // will be updated silently. // Currently add copy for all non-const static global in // CGMSHLSLRuntime::EmitHLSLOutParamConversionInit. // So here just return false and do nothing. // For case like // struct T { // float4 a[10]; //}; // static T g; // void foo(inout T t) { // // modify g //} // void bar() { // T t = g; // // Not copy because t is local. // // But optimizations will change t to g later. // foo(t); //} // Optimizations which remove the copy should not replace foo(t) into foo(g) // when g could be modified. // TODO: remove copy for global in // CGMSHLSLRuntime::EmitHLSLOutParamConversionInit, do analysis to check alias // only generate copy when there's alias. return false; } struct CopyData { CallInst *CallSite; Value *Arg; bool bCopyIn; bool bCopyOut; }; void ParameterCopyInCopyOut(hlsl::HLModule &HLM) { Module &M = *HLM.GetModule(); Type *HandleTy = HLM.GetOP()->GetHandleType(); const DataLayout &DL = M.getDataLayout(); std::vector<GlobalVariable *> staticGVs; for (GlobalVariable &GV : M.globals()) { if (dxilutil::IsStaticGlobal(&GV) && !GV.isConstant()) { staticGVs.emplace_back(&GV); } } SmallVector<CopyData, 4> WorkList; for (Function &F : M) { if (F.user_empty()) continue; DxilFunctionAnnotation *Annot = HLM.GetFunctionAnnotation(&F); // Skip functions don't have annotation, include llvm intrinsic and HLOp // functions. if (!Annot) continue; bool bNoInline = F.hasFnAttribute(llvm::Attribute::NoInline) || F.isDeclaration(); for (User *U : F.users()) { CallInst *CI = dyn_cast<CallInst>(U); if (!CI) continue; for (unsigned i = 0; i < CI->getNumArgOperands(); i++) { Value *arg = CI->getArgOperand(i); if (!arg->getType()->isPointerTy()) continue; DxilParameterAnnotation &ParamAnnot = Annot->GetParameterAnnotation(i); bool bCopyIn = false; bool bCopyOut = false; switch (ParamAnnot.GetParamInputQual()) { default: break; case DxilParamInputQual::In: { bCopyIn = true; } break; case DxilParamInputQual::Out: { bCopyOut = true; } break; case DxilParamInputQual::Inout: { bCopyIn = true; bCopyOut = true; } break; } if (!bCopyIn && !bCopyOut) continue; // When use ptr from cbuffer/buffer, need copy to avoid lower on user // function. bool bNeedCopy = mayAliasWithGlobal(arg, CI, staticGVs); if (bNoInline) bNeedCopy |= isPointerNeedToLower(arg, HandleTy); if (!bNeedCopy) continue; CopyData data = {CI, arg, bCopyIn, bCopyOut}; WorkList.emplace_back(data); } } } for (CopyData &data : WorkList) { CallInst *CI = data.CallSite; Value *arg = data.Arg; Type *Ty = arg->getType()->getPointerElementType(); Type *EltTy = dxilutil::GetArrayEltTy(Ty); // Skip on object type and resource type. if (dxilutil::IsHLSLObjectType(EltTy) || dxilutil::IsHLSLResourceType(EltTy)) continue; unsigned size = DL.getTypeAllocSize(Ty); AllocaInst *temp = createAllocaForPatch(*CI->getParent()->getParent(), Ty); // TODO: Adding lifetime intrinsics isn't easy here, have to analyze uses. if (data.bCopyIn) copyIn(temp, arg, CI, size); if (data.bCopyOut) copyOut(temp, arg, CI, size); CI->replaceUsesOfWith(arg, temp); } } } // namespace bool HLLegalizeParameter::runOnModule(Module &M) { HLModule &HLM = M.GetOrCreateHLModule(); auto &typeSys = HLM.GetTypeSystem(); const DataLayout &DL = M.getDataLayout(); for (Function &F : M) { if (F.isDeclaration()) continue; DxilFunctionAnnotation *Annot = HLM.GetFunctionAnnotation(&F); if (!Annot) continue; for (Argument &Arg : F.args()) { if (!Arg.getType()->isPointerTy()) continue; Type *EltTy = dxilutil::GetArrayEltTy(Arg.getType()); if (dxilutil::IsHLSLObjectType(EltTy) || dxilutil::IsHLSLResourceType(EltTy)) continue; DxilParameterAnnotation &ParamAnnot = Annot->GetParameterAnnotation(Arg.getArgNo()); switch (ParamAnnot.GetParamInputQual()) { default: break; case DxilParamInputQual::In: { hlutil::PointerStatus PS(&Arg, 0, /*bLdStOnly*/ true); PS.analyze(typeSys, /*bStructElt*/ false); if (PS.HasStored()) { patchWriteOnInParam(F, Arg, DL); } } break; case DxilParamInputQual::Out: { hlutil::PointerStatus PS(&Arg, 0, /*bLdStOnly*/ true); PS.analyze(typeSys, /*bStructElt*/ false); if (PS.HasLoaded()) { patchReadOnOutParam(F, Arg, DL); } } } } } // Copy-in copy-out for ptr arg when need. ParameterCopyInCopyOut(HLM); return true; } void HLLegalizeParameter::patchWriteOnInParam(Function &F, Argument &Arg, const DataLayout &DL) { // TODO: Adding lifetime intrinsics isn't easy here, have to analyze uses. Type *Ty = Arg.getType()->getPointerElementType(); AllocaInst *temp = createAllocaForPatch(F, Ty); Arg.replaceAllUsesWith(temp); IRBuilder<> Builder(temp->getNextNode()); unsigned size = DL.getTypeAllocSize(Ty); // copy arg to temp at beginning of function. Builder.CreateMemCpy(temp, &Arg, size, 1); } void HLLegalizeParameter::patchReadOnOutParam(Function &F, Argument &Arg, const DataLayout &DL) { // TODO: Adding lifetime intrinsics isn't easy here, have to analyze uses. Type *Ty = Arg.getType()->getPointerElementType(); AllocaInst *temp = createAllocaForPatch(F, Ty); Arg.replaceAllUsesWith(temp); unsigned size = DL.getTypeAllocSize(Ty); for (auto &BB : F.getBasicBlockList()) { // copy temp to arg before every return. if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) { IRBuilder<> RetBuilder(RI); RetBuilder.CreateMemCpy(&Arg, temp, size, 1); } } } char HLLegalizeParameter::ID = 0; ModulePass *llvm::createHLLegalizeParameter() { return new HLLegalizeParameter(); } INITIALIZE_PASS(HLLegalizeParameter, "hl-legalize-parameter", "Legalize parameter", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilLegalizeEvalOperations.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilLegalizeEvalOperations.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilModule.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/HLSL/HLOperations.h" #include "dxc/HlslIntrinsicOp.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Transforms/Utils/SSAUpdater.h" #include <unordered_set> #include <vector> using namespace llvm; using namespace hlsl; // Make sure src of EvalOperations are from function parameter. // This is needed in order to translate EvaluateAttribute operations that traces // back to LoadInput operations during translation stage. Promoting load/store // instructions beforehand will allow us to easily trace back to loadInput from // function call. namespace { class DxilLegalizeEvalOperations : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilLegalizeEvalOperations() : ModulePass(ID) {} StringRef getPassName() const override { return "DXIL Legalize EvalOperations"; } bool runOnModule(Module &M) override { for (Function &F : M.getFunctionList()) { hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroup(&F); if (group == HLOpcodeGroup::HLIntrinsic) { std::vector<CallInst *> EvalFunctionCalls; // Find all EvaluateAttribute calls for (User *U : F.users()) { if (CallInst *CI = dyn_cast<CallInst>(U)) { IntrinsicOp evalOp = static_cast<IntrinsicOp>(hlsl::GetHLOpcode(CI)); if (evalOp == IntrinsicOp::IOP_EvaluateAttributeAtSample || evalOp == IntrinsicOp::IOP_EvaluateAttributeCentroid || evalOp == IntrinsicOp::IOP_EvaluateAttributeSnapped || evalOp == IntrinsicOp::IOP_GetAttributeAtVertex) { EvalFunctionCalls.push_back(CI); } } } if (EvalFunctionCalls.empty()) { continue; } // Start from the call instruction, find all allocas that this call // uses. std::unordered_set<AllocaInst *> allocas; for (CallInst *CI : EvalFunctionCalls) { FindAllocasForEvalOperations(CI, allocas); } SSAUpdater SSA; SmallVector<Instruction *, 4> Insts; for (AllocaInst *AI : allocas) { for (User *user : AI->users()) { if (isa<LoadInst>(user) || isa<StoreInst>(user)) { Insts.emplace_back(cast<Instruction>(user)); } } LoadAndStorePromoter(Insts, SSA).run(Insts); Insts.clear(); } } } return true; } private: void FindAllocasForEvalOperations(Value *val, std::unordered_set<AllocaInst *> &allocas); }; char DxilLegalizeEvalOperations::ID = 0; // Find allocas for EvaluateAttribute operations void DxilLegalizeEvalOperations::FindAllocasForEvalOperations( Value *val, std::unordered_set<AllocaInst *> &allocas) { Value *CurVal = val; while (!isa<AllocaInst>(CurVal)) { if (CallInst *CI = dyn_cast<CallInst>(CurVal)) { CurVal = CI->getOperand(HLOperandIndex::kUnaryOpSrc0Idx); } else if (InsertElementInst *IE = dyn_cast<InsertElementInst>(CurVal)) { Value *arg0 = IE->getOperand(0); // Could be another insertelement or undef Value *arg1 = IE->getOperand(1); FindAllocasForEvalOperations(arg0, allocas); CurVal = arg1; } else if (ShuffleVectorInst *SV = dyn_cast<ShuffleVectorInst>(CurVal)) { Value *arg0 = SV->getOperand(0); Value *arg1 = SV->getOperand(1); FindAllocasForEvalOperations( arg0, allocas); // Shuffle vector could come from different allocas CurVal = arg1; } else if (ExtractElementInst *EE = dyn_cast<ExtractElementInst>(CurVal)) { CurVal = EE->getOperand(0); } else if (LoadInst *LI = dyn_cast<LoadInst>(CurVal)) { CurVal = LI->getOperand(0); } else { break; } } if (AllocaInst *AI = dyn_cast<AllocaInst>(CurVal)) { allocas.insert(AI); } } } // namespace ModulePass *llvm::createDxilLegalizeEvalOperationsPass() { return new DxilLegalizeEvalOperations(); } INITIALIZE_PASS(DxilLegalizeEvalOperations, "hlsl-dxil-legalize-eval-operations", "DXIL legalize eval operations", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilExpandTrigIntrinsics.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilExpandTrigIntrinsics.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Expand trigonmetric intrinsics to a sequence of dxil instructions. // // ========================================================================= // // // We provide expansions to approximate several trigonmetric functions that // typically do not have native instructions in hardware. The details of each // expansion is given below, but typically the exansion occurs in three steps // // 1. Perform range reduction (if necessary) to reduce input range // to a value that works with the approximation. // 2. Compute an approximation to the function (typically by evaluating // a polynomial). // 3. Perform range expansion (if necessary) to map the result back to // the original range. // // For example, say we are expanding f(x) using an approximation to f, call it // f*(x). And assume that f* only works for positive inputs, but we know that // f(-x) = -f(x).Then the expansion would be // // 1. a = abs(x) // 2. v = f*(a) // 3. e = x < 0 ? -v : v // // where e contains the final expanded result. // // References // --------------------------------------------------------------------------- // [HMF] Handbook of Mathematical Formulas by Abramowitz and Stegun, 1964 // [ADC] Approximations for Digital Computers by Hastings, 1955 // [WIK] Wikipedia, 2017 // // The approximation functions mostly come from [ADC]. The approximations // are also referenced in [HMF], but they give original credit to [ADC]. // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilInstructions.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilSignatureElement.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/Support/Global.h" #include "llvm/ADT/MapVector.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include <cmath> #include <utility> using namespace llvm; using namespace hlsl; namespace { class DxilExpandTrigIntrinsics : public FunctionPass { private: public: static char ID; // Pass identification, replacement for typeid explicit DxilExpandTrigIntrinsics() : FunctionPass(ID) {} StringRef getPassName() const override { return "DXIL expand trig intrinsics"; } bool runOnFunction(Function &F) override; private: typedef std::vector<CallInst *> IntrinsicList; IntrinsicList findTrigFunctionsToExpand(Function &F); CallInst *isExpandableTrigIntrinsicCall(Instruction *I); bool expandTrigIntrinsics(DxilModule &DM, const IntrinsicList &worklist); FastMathFlags getFastMathFlagsForIntrinsic(CallInst *intrinsic); void prepareBuilderToExpandIntrinsic(IRBuilder<> &builder, CallInst *intrinsic); // Expansion implementations. Value *expandACos(IRBuilder<> &builder, DxilInst_Acos acos, DxilModule &DM); Value *expandASin(IRBuilder<> &builder, DxilInst_Asin asin, DxilModule &DM); Value *expandATan(IRBuilder<> &builder, DxilInst_Atan atan, DxilModule &DM); Value *expandHCos(IRBuilder<> &builder, DxilInst_Hcos hcos, DxilModule &DM); Value *expandHSin(IRBuilder<> &builder, DxilInst_Hsin hsin, DxilModule &DM); Value *expandHTan(IRBuilder<> &builder, DxilInst_Htan htan, DxilModule &DM); Value *expandTan(IRBuilder<> &builder, DxilInst_Tan tan, DxilModule &DM); }; // Math constants. // Values taken from https://msdn.microsoft.com/en-us/library/4hwaceh6.aspx. // Replicated here because they are not part of standard C++. namespace math { constexpr double PI = 3.14159265358979323846; constexpr double PI_2 = 1.57079632679489661923; constexpr double LOG2E = 1.44269504088896340736; } // namespace math } // namespace bool DxilExpandTrigIntrinsics::runOnFunction(Function &F) { DxilModule &DM = F.getParent()->GetOrCreateDxilModule(); IntrinsicList intrinsics = findTrigFunctionsToExpand(F); const bool changed = expandTrigIntrinsics(DM, intrinsics); return changed; } CallInst * DxilExpandTrigIntrinsics::isExpandableTrigIntrinsicCall(Instruction *I) { if (OP::IsDxilOpFuncCallInst(I)) { switch (OP::GetDxilOpFuncCallInst(I)) { case OP::OpCode::Acos: case OP::OpCode::Asin: case OP::OpCode::Atan: case OP::OpCode::Hcos: case OP::OpCode::Hsin: case OP::OpCode::Htan: case OP::OpCode::Tan: return cast<CallInst>(I); default: break; } } return nullptr; } DxilExpandTrigIntrinsics::IntrinsicList DxilExpandTrigIntrinsics::findTrigFunctionsToExpand(Function &F) { IntrinsicList worklist; for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) if (CallInst *call = isExpandableTrigIntrinsicCall(&*I)) worklist.push_back(call); return worklist; } static bool isPreciseBuilder(IRBuilder<> &builder) { return !builder.getFastMathFlags().any(); } static void setPreciseBuilder(IRBuilder<> &builder, bool precise) { FastMathFlags flags; if (precise) flags.clear(); else flags.setUnsafeAlgebra(); builder.SetFastMathFlags(flags); } void DxilExpandTrigIntrinsics::prepareBuilderToExpandIntrinsic( IRBuilder<> &builder, CallInst *intrinsic) { DxilModule &DM = intrinsic->getModule()->GetOrCreateDxilModule(); builder.SetInsertPoint(intrinsic); setPreciseBuilder(builder, DM.IsPrecise(intrinsic)); } bool DxilExpandTrigIntrinsics::expandTrigIntrinsics( DxilModule &DM, const IntrinsicList &worklist) { IRBuilder<> builder(DM.GetCtx()); for (CallInst *intrinsic : worklist) { Value *expansion = nullptr; prepareBuilderToExpandIntrinsic(builder, intrinsic); OP::OpCode opcode = OP::GetDxilOpFuncCallInst(intrinsic); switch (opcode) { case OP::OpCode::Acos: expansion = expandACos(builder, intrinsic, DM); break; case OP::OpCode::Asin: expansion = expandASin(builder, intrinsic, DM); break; case OP::OpCode::Atan: expansion = expandATan(builder, intrinsic, DM); break; case OP::OpCode::Hcos: expansion = expandHCos(builder, intrinsic, DM); break; case OP::OpCode::Hsin: expansion = expandHSin(builder, intrinsic, DM); break; case OP::OpCode::Htan: expansion = expandHTan(builder, intrinsic, DM); break; case OP::OpCode::Tan: expansion = expandTan(builder, intrinsic, DM); break; default: assert(false && "unexpected intrinsic"); break; } assert(expansion); intrinsic->replaceAllUsesWith(expansion); intrinsic->eraseFromParent(); } return !worklist.empty(); } // Helper // return dx.op.UnaryFloat(X) // static Value *emitUnaryFloat(IRBuilder<> &builder, Value *X, OP *dxOp, OP::OpCode opcode, StringRef name) { Function *F = dxOp->GetOpFunc(opcode, X->getType()); Value *Args[] = {dxOp->GetI32Const(static_cast<int>(opcode)), X}; CallInst *Call = builder.CreateCall(F, Args, name); if (isPreciseBuilder(builder)) DxilMDHelper::MarkPrecise(Call); return Call; } // Helper // return dx.op.Fabs(X) // static Value *emitFAbs(IRBuilder<> &builder, Value *X, OP *dxOp, StringRef name) { return emitUnaryFloat(builder, X, dxOp, OP::OpCode::FAbs, name); } // Helper // return dx.op.Sqrt(X) // static Value *emitSqrt(IRBuilder<> &builder, Value *X, OP *dxOp, StringRef name) { return emitUnaryFloat(builder, X, dxOp, OP::OpCode::Sqrt, name); } // Helper // return sqrt(1 - X) * psi*(X) // // We compute the polynomial using Horners method to evaluate it efficently. // // psi*(X) = a0 + a1x + a2x^2 + a3x^3 // = a0 + x(a1 + a2x + a3x^2) // = a0 + x(a1 + x(a2 + a3x)) // static Value *emitSqrt1mXtimesPsiX(IRBuilder<> &builder, Value *X, OP *dxOp, StringRef name) { Value *One = ConstantFP::get(X->getType(), 1.0); Value *a0 = ConstantFP::get(X->getType(), 1.5707288); Value *a1 = ConstantFP::get(X->getType(), -0.2121144); Value *a2 = ConstantFP::get(X->getType(), 0.0742610); Value *a3 = ConstantFP::get(X->getType(), -0.0187293); // sqrt(1-x) Value *r1 = builder.CreateFSub(One, X, name); Value *r2 = emitSqrt(builder, r1, dxOp, name); // psi*(x) Value *r3 = builder.CreateFMul(X, a3, name); r3 = builder.CreateFAdd(r3, a2, name); r3 = builder.CreateFMul(X, r3, name); r3 = builder.CreateFAdd(r3, a1, name); r3 = builder.CreateFMul(X, r3, name); r3 = builder.CreateFAdd(r3, a0, name); // sqrt(1-x) * psi*(x) Value *r4 = builder.CreateFMul(r2, r3, name); return r4; } // Helper // return e^x, e^-x // // We can use the dxil Exp function to compute the exponential. The only slight // wrinkle is that in dxil Exp(x) = 2^x and we need e^x. Luckily we can easily // change the base of the exponent using the following identity [HFM(p69)] // // e^x = 2^{x * log_2(e)} // static std::pair<Value *, Value *> emitExEmx(IRBuilder<> &builder, Value *X, OP *dxOp, StringRef name) { Value *Zero = ConstantFP::get(X->getType(), 0.0); Value *Log2e = ConstantFP::get(X->getType(), math::LOG2E); Value *r0 = builder.CreateFMul(X, Log2e, name); Value *r1 = emitUnaryFloat(builder, r0, dxOp, OP::OpCode::Exp, name); Value *r2 = builder.CreateFSub(Zero, r0, name); Value *r3 = emitUnaryFloat(builder, r2, dxOp, OP::OpCode::Exp, name); return std::make_pair(r1, r3); } // Asin // ---------------------------------------------------------------------------- // Function // arcsin X = pi/2 - sqrt(1 - X) * psi(X) // // Range // 0 <= X <= 1 // // Approximation // Psi*(X) = a0 + a1x + a2x^2 + a3x^3 // a0 = 1.5707288 // a1 = -0.2121144 // a2 = 0.0742610 // a3 = -0.0187293 // // The domain of the approximation is 0 <=x <= 1, but the domain of asin is // -1 <= x <= 1. So we need to perform a range reduction to [0,1] before // computing the approximation. // // We use the following identity from [HMF(p80),WIK] for range reduction // // asin(-x) = -asin(x) // // We take the absolute value of x, compute asin(x) using the approximation // and then negate the value if x < 0. // // In [HMF] the authors claim an error, e, of |e| <= 5e-5, but the error graph // in [ADC] looks like the error can be larger that that for some inputs. // Value *DxilExpandTrigIntrinsics::expandASin(IRBuilder<> &builder, DxilInst_Asin asin, DxilModule &DM) { assert(asin); StringRef name = "asin.x"; Value *X = asin.get_value(); Value *PI_2 = ConstantFP::get(X->getType(), math::PI_2); Value *Zero = ConstantFP::get(X->getType(), 0.0); // Range reduction to [0, 1] Value *absX = emitFAbs(builder, X, DM.GetOP(), name); // Approximation Value *psiX = emitSqrt1mXtimesPsiX(builder, absX, DM.GetOP(), name); Value *asinX = builder.CreateFSub(PI_2, psiX, name); Value *asinmX = builder.CreateFSub(Zero, asinX, name); // Range expansion to [-1, 1] Value *lt0 = builder.CreateFCmp(CmpInst::FCMP_ULT, X, Zero, name); Value *r = builder.CreateSelect(lt0, asinmX, asinX, name); return r; } // Acos // ---------------------------------------------------------------------------- // The acos expansion uses the following identity [WIK]. So that we can use the // same approximation psi*(x) that we use for asin. // // acos(x) = pi/2 - asin(x) // // Substituting the equation for asin(x) we get // // acos(x) = pi/2 - asin(x) // = pi/2 - (pi/2 - sqrt(1-x)*psi(x)) // = sqrt(1-x)*psi(x) // // We use the following identity from [HMF(p80),WIK] for range reduction // // acos(-x) = pi - acos(x) // = pi - sqrt(1-x)*psi(x) // // We take the absolute value of x, compute acos(x) using the approximation // and then subtract from pi if x < 0. // Value *DxilExpandTrigIntrinsics::expandACos(IRBuilder<> &builder, DxilInst_Acos acos, DxilModule &DM) { assert(acos); StringRef name = "acos.x"; Value *X = acos.get_value(); Value *PI = ConstantFP::get(X->getType(), math::PI); Value *Zero = ConstantFP::get(X->getType(), 0.0); // Range reduction to [0, 1] Value *absX = emitFAbs(builder, X, DM.GetOP(), name); // Approximation Value *acosX = emitSqrt1mXtimesPsiX(builder, absX, DM.GetOP(), name); Value *acosmX = builder.CreateFSub(PI, acosX, name); // Range expansion to [-1, 1] Value *lt0 = builder.CreateFCmp(CmpInst::FCMP_ULT, X, Zero, name); Value *r = builder.CreateSelect(lt0, acosmX, acosX, name); return r; } // Atan // ---------------------------------------------------------------------------- // Function // arctan X // // Range // -1 <= X <= 1 // // Approximation // arctan*(x) = c1x + c3x^3 + c5x^5 + c7x^7 + c9x^9 // c1 = 0.9998660 // c3 = -0.3302995 // c5 = 0.1801410 // c7 = -0.0851330 // c9 = 0.0208351 // // The polynomial is evaluated using Horner's method to efficiently compute the // value // // c1x + c3x^3 + c5x^5 + c7x^7 + c9x^9 // = x(c1 + c3x^2 + c5x^4 + c7x^6 + c9x^8) // = x(c1 + x^2(c3 + c5x^2 + c7x^4 + c9x^6)) // = x(c1 + x^2(c3 + x^2(c5 + c7x^2 + c9x^4))) // = x(c1 + x^2(c3 + x^2(c5 + x^2(c7 + c9x^2)))) // // The range reduction is a little more compilicated for atan because the // domain of atan is [-inf, inf], but the domain of the approximation is only // [-1, 1]. We use the following identities for range reduction from // [HMF(p80),WIK] // // arctan(-x) = -arctan(x) // arctan(x) = pi/2 - arctan(1/x) if x > 0 // // The first identity allows us to only work with positive numbers. The second // identity allows us to reduce the range to [0,1]. We first convert the value // to positive by taking abs(x). Then if x > 1 we compute arctan(1/x). // // To expand the range we check if x > 1 then subtracted the computed value from // pi/2 and if x is negative then negate the final value. // Value *DxilExpandTrigIntrinsics::expandATan(IRBuilder<> &builder, DxilInst_Atan atan, DxilModule &DM) { assert(atan); StringRef name = "atan.x"; Value *X = atan.get_value(); Value *PI_2 = ConstantFP::get(X->getType(), math::PI_2); Value *One = ConstantFP::get(X->getType(), 1.0); Value *Zero = ConstantFP::get(X->getType(), 0.0); Value *c1 = ConstantFP::get(X->getType(), 0.9998660); Value *c3 = ConstantFP::get(X->getType(), -0.3302995); Value *c5 = ConstantFP::get(X->getType(), 0.1801410); Value *c7 = ConstantFP::get(X->getType(), -0.0851330); Value *c9 = ConstantFP::get(X->getType(), 0.0208351); // Range reduction to [0, inf] Value *absX = emitFAbs(builder, X, DM.GetOP(), name); // Range reduction to [0, 1] Value *gt1 = builder.CreateFCmp(CmpInst::FCMP_UGT, absX, One, name); Value *r1 = builder.CreateFDiv(One, absX, name); Value *r2 = builder.CreateSelect(gt1, r1, absX, name); // Approximate Value *r3 = builder.CreateFMul(r2, r2, name); Value *r4 = builder.CreateFMul(r3, c9, name); r4 = builder.CreateFAdd(r4, c7, name); r4 = builder.CreateFMul(r4, r3, name); r4 = builder.CreateFAdd(r4, c5, name); r4 = builder.CreateFMul(r4, r3, name); r4 = builder.CreateFAdd(r4, c3, name); r4 = builder.CreateFMul(r4, r3, name); r4 = builder.CreateFAdd(r4, c1, name); r4 = builder.CreateFMul(r2, r4, name); // Range Expansion to [0, inf] Value *r5 = builder.CreateFSub(PI_2, r4, name); Value *r6 = builder.CreateSelect(gt1, r5, r4, name); // Range Expansion to [-inf, inf] Value *r7 = builder.CreateFSub(Zero, r6, name); Value *lt0 = builder.CreateFCmp(CmpInst::FCMP_ULT, X, Zero, name); Value *r = builder.CreateSelect(lt0, r7, r6, name); return r; } // Hcos // ---------------------------------------------------------------------------- // We use the following identity for computing hcos(x) from [HMF(p83)] // // cosh(x) = (e^x + e^-x) / 2 // // No range reduction is needed. // Value *DxilExpandTrigIntrinsics::expandHCos(IRBuilder<> &builder, DxilInst_Hcos hcos, DxilModule &DM) { assert(hcos); StringRef name = "hcos.x"; Value *eX, *emX; Value *X = hcos.get_value(); Value *Two = ConstantFP::get(X->getType(), 2.0); std::tie(eX, emX) = emitExEmx(builder, X, DM.GetOP(), name); Value *r4 = builder.CreateFAdd(eX, emX, name); Value *r = builder.CreateFDiv(r4, Two, name); return r; } // Hsin // ---------------------------------------------------------------------------- // We use the following identity for computing hsin(x) from[HMF(p83)] // // sinh(x) = (e^x - e^-x) / 2 // // No range reduction is needed. // Value *DxilExpandTrigIntrinsics::expandHSin(IRBuilder<> &builder, DxilInst_Hsin hsin, DxilModule &DM) { assert(hsin); StringRef name = "hsin.x"; Value *eX, *emX; Value *X = hsin.get_value(); Value *Two = ConstantFP::get(X->getType(), 2.0); std::tie(eX, emX) = emitExEmx(builder, X, DM.GetOP(), name); Value *r4 = builder.CreateFSub(eX, emX, name); Value *r = builder.CreateFDiv(r4, Two, name); return r; } // Htan // ---------------------------------------------------------------------------- // We use the following identity for computing hsin(x) from[HMF(p83)] // // tanh(x) = (e^x - e^-x) / (e^x + e^-x) // // No range reduction is needed. // Value *DxilExpandTrigIntrinsics::expandHTan(IRBuilder<> &builder, DxilInst_Htan htan, DxilModule &DM) { assert(htan); StringRef name = "htan.x"; Value *eX, *emX; Value *X = htan.get_value(); std::tie(eX, emX) = emitExEmx(builder, X, DM.GetOP(), name); Value *r4 = builder.CreateFSub(eX, emX, name); Value *r5 = builder.CreateFAdd(eX, emX, name); Value *r = builder.CreateFDiv(r4, r5, name); return r; } // Tan // ---------------------------------------------------------------------------- // We use the following identity for computing tan(x) // // tan(x) = sin(x) / cos(x) // // No range reduction is needed. // Value *DxilExpandTrigIntrinsics::expandTan(IRBuilder<> &builder, DxilInst_Tan tan, DxilModule &DM) { assert(tan); StringRef name = "tan.x"; Value *X = tan.get_value(); OP *dxOp = DM.GetOP(); Value *sin = emitUnaryFloat(builder, X, dxOp, OP::OpCode::Sin, name); Value *cos = emitUnaryFloat(builder, X, dxOp, OP::OpCode::Cos, name); Value *r = builder.CreateFDiv(sin, cos, name); return r; } char DxilExpandTrigIntrinsics::ID = 0; FunctionPass *llvm::createDxilExpandTrigIntrinsicsPass() { return new DxilExpandTrigIntrinsics(); } INITIALIZE_PASS(DxilExpandTrigIntrinsics, "hlsl-dxil-expand-trig-intrinsics", "DXIL expand trig intrinsics", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/CMakeLists.txt
# Copyright (C) Microsoft Corporation. All rights reserved. # This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details. add_hlsl_hctgen(DxcOptimizer OUTPUT DxcOptimizer.inc BUILD_DIR) add_llvm_library(LLVMHLSL ComputeViewIdState.cpp ComputeViewIdStateBuilder.cpp ControlDependence.cpp DxilCondenseResources.cpp DxilContainerReflection.cpp DxilConvergent.cpp DxilEliminateOutputDynamicIndexing.cpp DxilExpandTrigIntrinsics.cpp DxilGenerationPass.cpp DxilLegalizeEvalOperations.cpp DxilLegalizeSampleOffsetPass.cpp DxilLinker.cpp DxilLoopDeletion.cpp DxilPrecisePropagatePass.cpp DxilPreparePasses.cpp DxilPromoteResourcePasses.cpp DxilPackSignatureElement.cpp DxilPatchShaderRecordBindings.cpp DxilNoops.cpp DxilPreserveAllOutputs.cpp DxilRenameResourcesPass.cpp DxilSimpleGVNHoist.cpp DxilSignatureValidation.cpp DxilTargetLowering.cpp DxilTargetTransformInfo.cpp DxilTranslateRawBuffer.cpp DxilExportMap.cpp DxcOptimizer.cpp HLDeadFunctionElimination.cpp HLExpandStoreIntrinsics.cpp HLLegalizeParameter.cpp HLLowerUDT.cpp HLMatrixBitcastLowerPass.cpp HLMatrixLowerPass.cpp HLMatrixSubscriptUseReplacer.cpp HLMatrixType.cpp HLMetadataPasses.cpp HLModule.cpp HLOperations.cpp HLOperationLower.cpp HLOperationLowerExtension.cpp HLPreprocess.cpp HLResource.cpp HLSignatureLower.cpp HLUtil.cpp PauseResumePasses.cpp WaveSensitivityAnalysis.cpp DxilNoOptLegalize.cpp DxilPoisonValues.cpp DxilDeleteRedundantDebugValues.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/IR ) add_dependencies(LLVMHLSL intrinsics_gen)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLExpandStoreIntrinsics.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLExpandStoreIntrinsics.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilTypeSystem.h" #include "dxc/HLSL/HLMatrixType.h" #include "dxc/HLSL/HLModule.h" #include "dxc/HLSL/HLOperations.h" #include "dxc/HlslIntrinsicOp.h" #include "dxc/Support/Global.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" #include "llvm/Transforms/Scalar.h" using namespace hlsl; using namespace llvm; namespace { // Expands buffer stores of aggregate value types // into stores of its individual elements, // before SROA happens and we lose the layout information. class HLExpandStoreIntrinsics : public FunctionPass { public: static char ID; explicit HLExpandStoreIntrinsics() : FunctionPass(ID) {} StringRef getPassName() const override { return "Expand HLSL store intrinsics"; } bool runOnFunction(Function &Func) override; private: DxilTypeSystem *m_typeSys; bool expand(CallInst *StoreCall); void emitElementStores(CallInst &OriginalCall, SmallVectorImpl<Value *> &GEPIndicesStack, Type *StackTopTy, unsigned OffsetFromBase, DxilFieldAnnotation *fieldAnnotation); }; char HLExpandStoreIntrinsics::ID = 0; bool HLExpandStoreIntrinsics::runOnFunction(Function &Func) { bool changed = false; m_typeSys = &(Func.getParent()->GetHLModule().GetTypeSystem()); for (auto InstIt = inst_begin(Func), InstEnd = inst_end(Func); InstIt != InstEnd;) { CallInst *Call = dyn_cast<CallInst>(&*(InstIt++)); if (Call == nullptr || GetHLOpcodeGroup(Call->getCalledFunction()) != HLOpcodeGroup::HLIntrinsic || static_cast<IntrinsicOp>(GetHLOpcode(Call)) != IntrinsicOp::MOP_Store) { continue; } changed |= expand(Call); } return changed; } bool HLExpandStoreIntrinsics::expand(CallInst *StoreCall) { Value *OldStoreValueArg = StoreCall->getArgOperand(HLOperandIndex::kStoreValOpIdx); Type *OldStoreValueArgTy = OldStoreValueArg->getType(); // Only expand if the value argument is by pointer, which means it's an // aggregate. if (!OldStoreValueArgTy->isPointerTy()) return false; IRBuilder<> Builder(StoreCall); SmallVector<Value *, 4> GEPIndicesStack; GEPIndicesStack.emplace_back(Builder.getInt32(0)); emitElementStores(*StoreCall, GEPIndicesStack, OldStoreValueArgTy->getPointerElementType(), /* OffsetFromBase */ 0, nullptr); DXASSERT(StoreCall->getType()->isVoidTy() && StoreCall->use_empty(), "Buffer store intrinsic is expected to return void and hence not " "have uses."); StoreCall->eraseFromParent(); return true; } void HLExpandStoreIntrinsics::emitElementStores( CallInst &OriginalCall, SmallVectorImpl<Value *> &GEPIndicesStack, Type *StackTopTy, unsigned OffsetFromBase, DxilFieldAnnotation *fieldAnnotation) { llvm::Module &Module = *OriginalCall.getModule(); IRBuilder<> Builder(&OriginalCall); StructType *StructTy = dyn_cast<StructType>(StackTopTy); if (StructTy != nullptr && !HLMatrixType::isa(StructTy)) { const StructLayout *Layout = Module.getDataLayout().getStructLayout(StructTy); DxilStructAnnotation *SA = m_typeSys->GetStructAnnotation(StructTy); for (unsigned i = 0; i < StructTy->getNumElements(); ++i) { Type *ElemTy = StructTy->getElementType(i); unsigned ElemOffsetFromBase = OffsetFromBase + Layout->getElementOffset(i); GEPIndicesStack.emplace_back(Builder.getInt32(i)); DxilFieldAnnotation *FA = SA != nullptr ? &(SA->GetFieldAnnotation(i)) : nullptr; emitElementStores(OriginalCall, GEPIndicesStack, ElemTy, ElemOffsetFromBase, FA); GEPIndicesStack.pop_back(); } } else if (ArrayType *ArrayTy = dyn_cast<ArrayType>(StackTopTy)) { unsigned ElemSize = (unsigned)Module.getDataLayout().getTypeAllocSize( ArrayTy->getElementType()); for (int i = 0; i < (int)ArrayTy->getNumElements(); ++i) { unsigned ElemOffsetFromBase = OffsetFromBase + ElemSize * i; GEPIndicesStack.emplace_back(Builder.getInt32(i)); emitElementStores(OriginalCall, GEPIndicesStack, ArrayTy->getElementType(), ElemOffsetFromBase, fieldAnnotation); GEPIndicesStack.pop_back(); } } else { // Scalar or vector Value *OpcodeVal = OriginalCall.getArgOperand(HLOperandIndex::kOpcodeIdx); Value *BufHandle = OriginalCall.getArgOperand(HLOperandIndex::kHandleOpIdx); Value *OffsetVal = OriginalCall.getArgOperand(HLOperandIndex::kStoreOffsetOpIdx); if (OffsetFromBase > 0) OffsetVal = Builder.CreateAdd(OffsetVal, Builder.getInt32(OffsetFromBase)); Value *AggPtr = OriginalCall.getArgOperand(HLOperandIndex::kStoreValOpIdx); Value *ElemPtr = Builder.CreateGEP(AggPtr, GEPIndicesStack); Value *ElemVal = nullptr; if (HLMatrixType::isa(StackTopTy) && fieldAnnotation && fieldAnnotation->HasMatrixAnnotation()) { // For matrix load, we generate HL intrinsic // matldst.colLoad/matldst.rowLoad instead of LLVM LoadInst to ensure that // it gets lowered properly later in HLMatrixLowerPass bool isRowMajor = fieldAnnotation->GetMatrixAnnotation().Orientation == hlsl::MatrixOrientation::RowMajor; unsigned matLdOpcode = isRowMajor ? static_cast<unsigned>(HLMatLoadStoreOpcode::RowMatLoad) : static_cast<unsigned>(HLMatLoadStoreOpcode::ColMatLoad); // Generate matrix load FunctionType *MatLdFnType = FunctionType::get( StackTopTy, {Builder.getInt32Ty(), ElemPtr->getType()}, /* isVarArg */ false); Function *MatLdFn = GetOrCreateHLFunction( Module, MatLdFnType, HLOpcodeGroup::HLMatLoadStore, matLdOpcode); Value *MatLdOpCode = ConstantInt::get(Builder.getInt32Ty(), matLdOpcode); ElemVal = Builder.CreateCall(MatLdFn, {MatLdOpCode, ElemPtr}); } else { ElemVal = Builder.CreateLoad(ElemPtr); // We go from memory to memory so // no special bool handling needed } FunctionType *NewCalleeType = FunctionType::get(Builder.getVoidTy(), {OpcodeVal->getType(), BufHandle->getType(), OffsetVal->getType(), ElemVal->getType()}, /* isVarArg */ false); Function *NewCallee = GetOrCreateHLFunction(Module, NewCalleeType, HLOpcodeGroup::HLIntrinsic, (unsigned)IntrinsicOp::MOP_Store); Builder.CreateCall(NewCallee, {OpcodeVal, BufHandle, OffsetVal, ElemVal}); } } } // namespace FunctionPass *llvm::createHLExpandStoreIntrinsicsPass() { return new HLExpandStoreIntrinsics(); } INITIALIZE_PASS(HLExpandStoreIntrinsics, "hl-expand-store-intrinsics", "Expand HLSL store intrinsics", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLSignatureLower.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLSignatureLower.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Lower signatures of entry function to DXIL LoadInput/StoreOutput. // // // /////////////////////////////////////////////////////////////////////////////// #include "HLSignatureLower.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilSemantic.h" #include "dxc/DXIL/DxilSigPoint.h" #include "dxc/DXIL/DxilSignatureElement.h" #include "dxc/DXIL/DxilTypeSystem.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/DxilPackSignatureElement.h" #include "dxc/HLSL/HLMatrixLowerHelper.h" #include "dxc/HLSL/HLMatrixType.h" #include "dxc/HLSL/HLModule.h" #include "dxc/HlslIntrinsicOp.h" #include "dxc/Support/Global.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; using namespace hlsl; namespace { // Decompose semantic name (eg FOO1=>FOO,1), change interp mode for SV_Position. // Return semantic index. unsigned UpdateSemanticAndInterpMode(StringRef &semName, DXIL::InterpolationMode &mode, DXIL::SigPointKind kind, LLVMContext &Context) { llvm::StringRef baseSemName; // The 'FOO' in 'FOO1'. uint32_t semIndex; // The '1' in 'FOO1' // Split semName and index. Semantic::DecomposeNameAndIndex(semName, &baseSemName, &semIndex); semName = baseSemName; const Semantic *semantic = Semantic::GetByName(semName, kind); if (semantic && semantic->GetKind() == Semantic::Kind::Position) { // Update interp mode to no_perspective version for SV_Position. switch (mode) { case InterpolationMode::Kind::LinearCentroid: mode = InterpolationMode::Kind::LinearNoperspectiveCentroid; break; case InterpolationMode::Kind::LinearSample: mode = InterpolationMode::Kind::LinearNoperspectiveSample; break; case InterpolationMode::Kind::Linear: mode = InterpolationMode::Kind::LinearNoperspective; break; case InterpolationMode::Kind::Constant: case InterpolationMode::Kind::Undefined: case InterpolationMode::Kind::Invalid: { llvm_unreachable("invalid interpolation mode for SV_Position"); } break; case InterpolationMode::Kind::LinearNoperspective: case InterpolationMode::Kind::LinearNoperspectiveCentroid: case InterpolationMode::Kind::LinearNoperspectiveSample: // Already Noperspective modes. break; } } return semIndex; } DxilSignatureElement *FindArgInSignature(Argument &arg, llvm::StringRef semantic, DXIL::InterpolationMode interpMode, DXIL::SigPointKind kind, DxilSignature &sig) { // Match output ID. unsigned semIndex = UpdateSemanticAndInterpMode(semantic, interpMode, kind, arg.getContext()); for (uint32_t i = 0; i < sig.GetElements().size(); i++) { DxilSignatureElement &SE = sig.GetElement(i); bool semNameMatch = semantic.equals_lower(SE.GetName()); bool semIndexMatch = semIndex == SE.GetSemanticIndexVec()[0]; if (semNameMatch && semIndexMatch) { // Find a match. return &SE; } } return nullptr; } } // namespace namespace { void replaceInputOutputWithIntrinsic(DXIL::SemanticKind semKind, Value *GV, OP *hlslOP, IRBuilder<> &Builder) { Type *Ty = GV->getType(); if (Ty->isPointerTy()) Ty = Ty->getPointerElementType(); OP::OpCode opcode; switch (semKind) { case Semantic::Kind::DomainLocation: opcode = OP::OpCode::DomainLocation; break; case Semantic::Kind::OutputControlPointID: opcode = OP::OpCode::OutputControlPointID; break; case Semantic::Kind::GSInstanceID: opcode = OP::OpCode::GSInstanceID; break; case Semantic::Kind::PrimitiveID: opcode = OP::OpCode::PrimitiveID; break; case Semantic::Kind::SampleIndex: opcode = OP::OpCode::SampleIndex; break; case Semantic::Kind::Coverage: opcode = OP::OpCode::Coverage; break; case Semantic::Kind::InnerCoverage: opcode = OP::OpCode::InnerCoverage; break; case Semantic::Kind::ViewID: opcode = OP::OpCode::ViewID; break; case Semantic::Kind::GroupThreadID: opcode = OP::OpCode::ThreadIdInGroup; break; case Semantic::Kind::GroupID: opcode = OP::OpCode::GroupId; break; case Semantic::Kind::DispatchThreadID: opcode = OP::OpCode::ThreadId; break; case Semantic::Kind::GroupIndex: opcode = OP::OpCode::FlattenedThreadIdInGroup; break; case Semantic::Kind::CullPrimitive: { GV->replaceAllUsesWith(ConstantInt::get(Ty, (uint64_t)0)); return; } break; case Semantic::Kind::StartVertexLocation: opcode = OP::OpCode::StartVertexLocation; break; case Semantic::Kind::StartInstanceLocation: opcode = OP::OpCode::StartInstanceLocation; break; default: DXASSERT(0, "invalid semantic"); return; } Function *dxilFunc = hlslOP->GetOpFunc(opcode, Ty->getScalarType()); Constant *OpArg = hlslOP->GetU32Const((unsigned)opcode); Value *newArg = nullptr; if (semKind == Semantic::Kind::DomainLocation || semKind == Semantic::Kind::GroupThreadID || semKind == Semantic::Kind::GroupID || semKind == Semantic::Kind::DispatchThreadID) { unsigned vecSize = 1; if (FixedVectorType *VT = dyn_cast<FixedVectorType>(Ty)) vecSize = VT->getNumElements(); newArg = Builder.CreateCall( dxilFunc, {OpArg, semKind == Semantic::Kind::DomainLocation ? hlslOP->GetU8Const(0) : hlslOP->GetU32Const(0)}); if (vecSize > 1) { Value *result = UndefValue::get(Ty); result = Builder.CreateInsertElement(result, newArg, (uint64_t)0); for (unsigned i = 1; i < vecSize; i++) { Value *newElt = Builder.CreateCall( dxilFunc, {OpArg, semKind == Semantic::Kind::DomainLocation ? hlslOP->GetU8Const(i) : hlslOP->GetU32Const(i)}); result = Builder.CreateInsertElement(result, newElt, i); } newArg = result; } } else { newArg = Builder.CreateCall(dxilFunc, {OpArg}); } if (newArg->getType() != GV->getType()) { DXASSERT_NOMSG(GV->getType()->isPointerTy()); for (User *U : GV->users()) { if (LoadInst *LI = dyn_cast<LoadInst>(U)) { LI->replaceAllUsesWith(newArg); } } } else { GV->replaceAllUsesWith(newArg); } } } // namespace void HLSignatureLower::ProcessArgument(Function *func, DxilFunctionAnnotation *funcAnnotation, Argument &arg, DxilFunctionProps &props, const ShaderModel *pSM, bool isPatchConstantFunction, bool forceOut, bool &hasClipPlane) { Type *Ty = arg.getType(); DxilParameterAnnotation &paramAnnotation = funcAnnotation->GetParameterAnnotation(arg.getArgNo()); hlsl::DxilParamInputQual qual = forceOut ? DxilParamInputQual::Out : paramAnnotation.GetParamInputQual(); bool isInout = qual == DxilParamInputQual::Inout; // If this was an inout param, do the output side first if (isInout) { DXASSERT(!isPatchConstantFunction, "Patch Constant function should not have inout param"); m_inoutArgSet.insert(&arg); const bool bForceOutTrue = true; ProcessArgument(func, funcAnnotation, arg, props, pSM, isPatchConstantFunction, bForceOutTrue, hasClipPlane); qual = DxilParamInputQual::In; } // Get stream index unsigned streamIdx = 0; switch (qual) { case DxilParamInputQual::OutStream1: streamIdx = 1; break; case DxilParamInputQual::OutStream2: streamIdx = 2; break; case DxilParamInputQual::OutStream3: streamIdx = 3; break; default: // Use streamIdx = 0 by default. break; } const SigPoint *sigPoint = SigPoint::GetSigPoint( SigPointFromInputQual(qual, props.shaderKind, isPatchConstantFunction)); unsigned rows, cols; HLModule::GetParameterRowsAndCols(Ty, rows, cols, paramAnnotation); CompType EltTy = paramAnnotation.GetCompType(); DXIL::InterpolationMode interpMode = paramAnnotation.GetInterpolationMode().GetKind(); // Set undefined interpMode. if (sigPoint->GetKind() == DXIL::SigPointKind::MSPOut) { if (interpMode != InterpolationMode::Kind::Undefined && interpMode != InterpolationMode::Kind::Constant) { dxilutil::EmitErrorOnFunction( HLM.GetModule()->getContext(), func, "Mesh shader's primitive outputs' interpolation mode must be " "constant or undefined."); } interpMode = InterpolationMode::Kind::Constant; } else if (!sigPoint->NeedsInterpMode()) interpMode = InterpolationMode::Kind::Undefined; else if (interpMode == InterpolationMode::Kind::Undefined) { // Type-based default: linear for floats, constant for others. if (EltTy.IsFloatTy()) interpMode = InterpolationMode::Kind::Linear; else interpMode = InterpolationMode::Kind::Constant; } // back-compat mode - remap obsolete semantics if (HLM.GetHLOptions().bDX9CompatMode && paramAnnotation.HasSemanticString()) { hlsl::RemapObsoleteSemantic(paramAnnotation, sigPoint->GetKind(), HLM.GetCtx()); } llvm::StringRef semanticStr = paramAnnotation.GetSemanticString(); if (semanticStr.empty()) { std::string msg = "Semantic must be defined for all "; msg += (qual == DxilParamInputQual::Out) ? "outputs " : "parameters "; msg += "of an entry function or patch constant function"; dxilutil::EmitErrorOnFunction(HLM.GetModule()->getContext(), func, msg); return; } UpdateSemanticAndInterpMode(semanticStr, interpMode, sigPoint->GetKind(), arg.getContext()); // Get Semantic interpretation, skipping if not in signature const Semantic *pSemantic = Semantic::GetByName(semanticStr); DXIL::SemanticInterpretationKind interpretation = SigPoint::GetInterpretation(pSemantic->GetKind(), sigPoint->GetKind(), pSM->GetMajor(), pSM->GetMinor()); // Verify system value semantics do not overlap. // Note: Arbitrary are always in the signature and will be verified with a // different mechanism. For patch constant function, only validate patch // constant elements (others already validated on hull function) if (pSemantic->GetKind() != DXIL::SemanticKind::Arbitrary && (!isPatchConstantFunction || (!sigPoint->IsInput() && !sigPoint->IsOutput()))) { auto &SemanticUseMap = sigPoint->IsInput() ? m_InputSemanticsUsed : (sigPoint->IsOutput() ? m_OutputSemanticsUsed[streamIdx] : (sigPoint->IsPatchConstOrPrim() ? m_PatchConstantSemanticsUsed : m_OtherSemanticsUsed)); if (SemanticUseMap.count((unsigned)pSemantic->GetKind()) > 0) { auto &SemanticIndexSet = SemanticUseMap[(unsigned)pSemantic->GetKind()]; for (unsigned idx : paramAnnotation.GetSemanticIndexVec()) { if (SemanticIndexSet.count(idx) > 0) { dxilutil::EmitErrorOnFunction( HLM.GetModule()->getContext(), func, "Parameter with semantic " + semanticStr + " has overlapping semantic index at " + std::to_string(idx) + "."); return; } } } auto &SemanticIndexSet = SemanticUseMap[(unsigned)pSemantic->GetKind()]; for (unsigned idx : paramAnnotation.GetSemanticIndexVec()) { SemanticIndexSet.emplace(idx); } // Enforce Coverage and InnerCoverage input mutual exclusivity if (sigPoint->IsInput()) { if ((pSemantic->GetKind() == DXIL::SemanticKind::Coverage && SemanticUseMap.count((unsigned)DXIL::SemanticKind::InnerCoverage) > 0) || (pSemantic->GetKind() == DXIL::SemanticKind::InnerCoverage && SemanticUseMap.count((unsigned)DXIL::SemanticKind::Coverage) > 0)) { dxilutil::EmitErrorOnFunction( HLM.GetModule()->getContext(), func, "Pixel shader inputs SV_Coverage and SV_InnerCoverage are mutually " "exclusive."); return; } } } // Validate interpretation and replace argument usage with load/store // intrinsics { switch (interpretation) { case DXIL::SemanticInterpretationKind::NA: { dxilutil::EmitErrorOnFunction( HLM.GetModule()->getContext(), func, Twine("Semantic ") + semanticStr + Twine(" is invalid for shader model: ") + ShaderModel::GetKindName(props.shaderKind)); return; } case DXIL::SemanticInterpretationKind::NotInSig: case DXIL::SemanticInterpretationKind::Shadow: { IRBuilder<> funcBuilder(func->getEntryBlock().getFirstInsertionPt()); if (DbgDeclareInst *DDI = llvm::FindAllocaDbgDeclare(&arg)) { funcBuilder.SetCurrentDebugLocation(DDI->getDebugLoc()); } replaceInputOutputWithIntrinsic(pSemantic->GetKind(), &arg, HLM.GetOP(), funcBuilder); if (interpretation == DXIL::SemanticInterpretationKind::NotInSig) return; // This argument should not be included in the signature break; } case DXIL::SemanticInterpretationKind::SV: case DXIL::SemanticInterpretationKind::SGV: case DXIL::SemanticInterpretationKind::Arb: case DXIL::SemanticInterpretationKind::Target: case DXIL::SemanticInterpretationKind::TessFactor: case DXIL::SemanticInterpretationKind::NotPacked: case DXIL::SemanticInterpretationKind::ClipCull: // Will be replaced with load/store intrinsics in // GenerateDxilInputsOutputs break; default: DXASSERT(false, "Unexpected SemanticInterpretationKind"); return; } } // Determine signature this argument belongs in, if any DxilSignature *pSig = nullptr; DXIL::SignatureKind sigKind = sigPoint->GetSignatureKindWithFallback(); switch (sigKind) { case DXIL::SignatureKind::Input: pSig = &EntrySig.InputSignature; break; case DXIL::SignatureKind::Output: pSig = &EntrySig.OutputSignature; break; case DXIL::SignatureKind::PatchConstOrPrim: pSig = &EntrySig.PatchConstOrPrimSignature; break; default: DXASSERT(false, "Expected real signature kind at this point"); return; // No corresponding signature } // Create and add element to signature DxilSignatureElement *pSE = nullptr; { // Add signature element to appropriate maps if (isPatchConstantFunction && sigKind != DXIL::SignatureKind::PatchConstOrPrim) { pSE = FindArgInSignature(arg, paramAnnotation.GetSemanticString(), interpMode, sigPoint->GetKind(), *pSig); if (!pSE) { dxilutil::EmitErrorOnFunction( HLM.GetModule()->getContext(), func, Twine("Signature element ") + semanticStr + Twine( ", referred to by patch constant function, is not found in " "corresponding hull shader ") + (sigKind == DXIL::SignatureKind::Input ? "input." : "output.")); return; } m_patchConstantInputsSigMap[arg.getArgNo()] = pSE; } else { std::unique_ptr<DxilSignatureElement> SE = pSig->CreateElement(); pSE = SE.get(); pSig->AppendElement(std::move(SE)); pSE->SetSigPointKind(sigPoint->GetKind()); pSE->Initialize(semanticStr, EltTy, interpMode, rows, cols, Semantic::kUndefinedRow, Semantic::kUndefinedCol, pSE->GetID(), paramAnnotation.GetSemanticIndexVec()); m_sigValueMap[pSE] = &arg; } } if (paramAnnotation.IsPrecise()) m_preciseSigSet.insert(pSE); if (sigKind == DXIL::SignatureKind::Output && pSemantic->GetKind() == Semantic::Kind::Position && hasClipPlane) { GenerateClipPlanesForVS(&arg); hasClipPlane = false; } // Set Output Stream. if (streamIdx > 0) pSE->SetOutputStream(streamIdx); } void HLSignatureLower::CreateDxilSignatures() { DxilFunctionProps &props = HLM.GetDxilFunctionProps(Entry); const ShaderModel *pSM = HLM.GetShaderModel(); DXASSERT(Entry->getReturnType()->isVoidTy(), "Should changed in SROA_Parameter_HLSL"); DxilFunctionAnnotation *EntryAnnotation = HLM.GetFunctionAnnotation(Entry); DXASSERT(EntryAnnotation, "must have function annotation for entry function"); bool bHasClipPlane = props.shaderKind == DXIL::ShaderKind::Vertex ? HasClipPlanes() : false; const bool isPatchConstantFunctionFalse = false; const bool bForOutFasle = false; for (Argument &arg : Entry->getArgumentList()) { Type *Ty = arg.getType(); // Skip streamout obj. if (HLModule::IsStreamOutputPtrType(Ty)) continue; // Skip OutIndices and InPayload DxilParameterAnnotation &paramAnnotation = EntryAnnotation->GetParameterAnnotation(arg.getArgNo()); hlsl::DxilParamInputQual qual = paramAnnotation.GetParamInputQual(); if (qual == hlsl::DxilParamInputQual::OutIndices || qual == hlsl::DxilParamInputQual::InPayload) continue; ProcessArgument(Entry, EntryAnnotation, arg, props, pSM, isPatchConstantFunctionFalse, bForOutFasle, bHasClipPlane); } if (bHasClipPlane) { dxilutil::EmitErrorOnFunction(HLM.GetModule()->getContext(), Entry, "Cannot use clipplanes attribute without " "specifying a 4-component SV_Position " "output"); } m_OtherSemanticsUsed.clear(); if (props.shaderKind == DXIL::ShaderKind::Hull) { Function *patchConstantFunc = props.ShaderProps.HS.patchConstantFunc; if (patchConstantFunc == nullptr) { llvm_unreachable("Patch constant function is not specified."); } DxilFunctionAnnotation *patchFuncAnnotation = HLM.GetFunctionAnnotation(patchConstantFunc); DXASSERT(patchFuncAnnotation, "must have function annotation for patch constant function"); const bool isPatchConstantFunctionTrue = true; for (Argument &arg : patchConstantFunc->getArgumentList()) { ProcessArgument(patchConstantFunc, patchFuncAnnotation, arg, props, pSM, isPatchConstantFunctionTrue, bForOutFasle, bHasClipPlane); } } } // Allocate input/output slots void HLSignatureLower::AllocateDxilInputOutputs() { DxilFunctionProps &props = HLM.GetDxilFunctionProps(Entry); const ShaderModel *pSM = HLM.GetShaderModel(); const HLOptions &opts = HLM.GetHLOptions(); DXASSERT_NOMSG(opts.PackingStrategy < (unsigned)DXIL::PackingStrategy::Invalid); DXIL::PackingStrategy packing = (DXIL::PackingStrategy)opts.PackingStrategy; if (packing == DXIL::PackingStrategy::Default) packing = pSM->GetDefaultPackingStrategy(); hlsl::PackDxilSignature(EntrySig.InputSignature, packing); if (!EntrySig.InputSignature.IsFullyAllocated()) { llvm_unreachable( "Failed to allocate all input signature elements in available space."); } if (props.shaderKind != DXIL::ShaderKind::Amplification) { hlsl::PackDxilSignature(EntrySig.OutputSignature, packing); if (!EntrySig.OutputSignature.IsFullyAllocated()) { llvm_unreachable("Failed to allocate all output signature elements in " "available space."); } } if (props.shaderKind == DXIL::ShaderKind::Hull || props.shaderKind == DXIL::ShaderKind::Domain || props.shaderKind == DXIL::ShaderKind::Mesh) { hlsl::PackDxilSignature(EntrySig.PatchConstOrPrimSignature, packing); if (!EntrySig.PatchConstOrPrimSignature.IsFullyAllocated()) { llvm_unreachable("Failed to allocate all patch constant signature " "elements in available space."); } } } namespace { // Helper functions and class for lower signature. void GenerateStOutput(Function *stOutput, MutableArrayRef<Value *> args, IRBuilder<> &Builder, bool cast) { if (cast) { Value *value = args[DXIL::OperandIndex::kStoreOutputValOpIdx]; args[DXIL::OperandIndex::kStoreOutputValOpIdx] = Builder.CreateZExt(value, Builder.getInt32Ty()); } Builder.CreateCall(stOutput, args); } void replaceStWithStOutput(Function *stOutput, StoreInst *stInst, Constant *OpArg, Constant *outputID, Value *idx, unsigned cols, Value *vertexOrPrimID, bool bI1Cast) { IRBuilder<> Builder(stInst); Value *val = stInst->getValueOperand(); if (VectorType *VT = dyn_cast<VectorType>(val->getType())) { DXASSERT_LOCALVAR(VT, cols == VT->getNumElements(), "vec size must match"); for (unsigned col = 0; col < cols; col++) { Value *subVal = Builder.CreateExtractElement(val, col); Value *colIdx = Builder.getInt8(col); SmallVector<Value *, 4> args = {OpArg, outputID, idx, colIdx, subVal}; if (vertexOrPrimID) args.emplace_back(vertexOrPrimID); GenerateStOutput(stOutput, args, Builder, bI1Cast); } // remove stInst stInst->eraseFromParent(); } else if (!val->getType()->isArrayTy()) { // TODO: support case cols not 1 DXASSERT(cols == 1, "only support scalar here"); Value *colIdx = Builder.getInt8(0); SmallVector<Value *, 4> args = {OpArg, outputID, idx, colIdx, val}; if (vertexOrPrimID) args.emplace_back(vertexOrPrimID); GenerateStOutput(stOutput, args, Builder, bI1Cast); // remove stInst stInst->eraseFromParent(); } else { DXASSERT(0, "not support array yet"); // TODO: support array. Value *colIdx = Builder.getInt8(0); ArrayType *AT = cast<ArrayType>(val->getType()); Value *args[] = {OpArg, outputID, idx, colIdx, /*val*/ nullptr}; (void)args; (void)AT; } } Value *GenerateLdInput(Function *loadInput, ArrayRef<Value *> args, IRBuilder<> &Builder, Value *zero, bool bCast, Type *Ty) { Value *input = Builder.CreateCall(loadInput, args); if (!bCast) return input; else { Value *bVal = Builder.CreateICmpNE(input, zero); IntegerType *IT = cast<IntegerType>(Ty); if (IT->getBitWidth() == 1) return bVal; else return Builder.CreateZExt(bVal, Ty); } } Value *replaceLdWithLdInput(Function *loadInput, LoadInst *ldInst, unsigned cols, MutableArrayRef<Value *> args, bool bCast) { IRBuilder<> Builder(ldInst); IRBuilder<> AllocaBuilder(dxilutil::FindAllocaInsertionPt(ldInst)); Type *Ty = ldInst->getType(); Type *EltTy = Ty->getScalarType(); // Change i1 to i32 for load input. Value *zero = Builder.getInt32(0); if (VectorType *VT = dyn_cast<VectorType>(Ty)) { Value *newVec = llvm::UndefValue::get(VT); DXASSERT(cols == VT->getNumElements(), "vec size must match"); for (unsigned col = 0; col < cols; col++) { Value *colIdx = Builder.getInt8(col); args[DXIL::OperandIndex::kLoadInputColOpIdx] = colIdx; Value *input = GenerateLdInput(loadInput, args, Builder, zero, bCast, EltTy); newVec = Builder.CreateInsertElement(newVec, input, col); } ldInst->replaceAllUsesWith(newVec); ldInst->eraseFromParent(); return newVec; } else { Value *colIdx = args[DXIL::OperandIndex::kLoadInputColOpIdx]; if (colIdx == nullptr) { DXASSERT(cols == 1, "only support scalar here"); colIdx = Builder.getInt8(0); } else { if (colIdx->getType() == Builder.getInt32Ty()) { colIdx = Builder.CreateTrunc(colIdx, Builder.getInt8Ty()); } } if (isa<ConstantInt>(colIdx)) { args[DXIL::OperandIndex::kLoadInputColOpIdx] = colIdx; Value *input = GenerateLdInput(loadInput, args, Builder, zero, bCast, EltTy); ldInst->replaceAllUsesWith(input); ldInst->eraseFromParent(); return input; } else { // Vector indexing. // Load to array. ArrayType *AT = ArrayType::get(ldInst->getType(), cols); Value *arrayVec = AllocaBuilder.CreateAlloca(AT); Value *zeroIdx = Builder.getInt32(0); for (unsigned col = 0; col < cols; col++) { Value *colIdx = Builder.getInt8(col); args[DXIL::OperandIndex::kLoadInputColOpIdx] = colIdx; Value *input = GenerateLdInput(loadInput, args, Builder, zero, bCast, EltTy); Value *GEP = Builder.CreateInBoundsGEP(arrayVec, {zeroIdx, colIdx}); Builder.CreateStore(input, GEP); } Value *vecIndexingPtr = Builder.CreateInBoundsGEP(arrayVec, {zeroIdx, colIdx}); Value *input = Builder.CreateLoad(vecIndexingPtr); ldInst->replaceAllUsesWith(input); ldInst->eraseFromParent(); return input; } } } void replaceMatStWithStOutputs(CallInst *CI, HLMatLoadStoreOpcode matOp, Function *ldStFunc, Constant *OpArg, Constant *ID, Constant *columnConsts[], Value *vertexOrPrimID, Value *idxVal) { IRBuilder<> LocalBuilder(CI); Value *Val = CI->getArgOperand(HLOperandIndex::kMatStoreValOpIdx); HLMatrixType MatTy = HLMatrixType::cast(CI->getArgOperand(HLOperandIndex::kMatStoreDstPtrOpIdx) ->getType() ->getPointerElementType()); Val = MatTy.emitLoweredRegToMem(Val, LocalBuilder); if (matOp == HLMatLoadStoreOpcode::ColMatStore) { for (unsigned c = 0; c < MatTy.getNumColumns(); c++) { Constant *constColIdx = LocalBuilder.getInt32(c); Value *colIdx = LocalBuilder.CreateAdd(idxVal, constColIdx); for (unsigned r = 0; r < MatTy.getNumRows(); r++) { unsigned matIdx = MatTy.getColumnMajorIndex(r, c); Value *Elt = LocalBuilder.CreateExtractElement(Val, matIdx); SmallVector<Value *, 6> argList = {OpArg, ID, colIdx, columnConsts[r], Elt}; if (vertexOrPrimID) argList.emplace_back(vertexOrPrimID); LocalBuilder.CreateCall(ldStFunc, argList); } } } else { for (unsigned r = 0; r < MatTy.getNumRows(); r++) { Constant *constRowIdx = LocalBuilder.getInt32(r); Value *rowIdx = LocalBuilder.CreateAdd(idxVal, constRowIdx); for (unsigned c = 0; c < MatTy.getNumColumns(); c++) { unsigned matIdx = MatTy.getRowMajorIndex(r, c); Value *Elt = LocalBuilder.CreateExtractElement(Val, matIdx); SmallVector<Value *, 6> argList = {OpArg, ID, rowIdx, columnConsts[c], Elt}; if (vertexOrPrimID) argList.emplace_back(vertexOrPrimID); LocalBuilder.CreateCall(ldStFunc, argList); } } } CI->eraseFromParent(); } void replaceMatLdWithLdInputs(CallInst *CI, HLMatLoadStoreOpcode matOp, Function *ldStFunc, Constant *OpArg, Constant *ID, Constant *columnConsts[], Value *vertexOrPrimID, Value *idxVal) { IRBuilder<> LocalBuilder(CI); HLMatrixType MatTy = HLMatrixType::cast(CI->getArgOperand(HLOperandIndex::kMatLoadPtrOpIdx) ->getType() ->getPointerElementType()); std::vector<Value *> matElts(MatTy.getNumElements()); if (matOp == HLMatLoadStoreOpcode::ColMatLoad) { for (unsigned c = 0; c < MatTy.getNumColumns(); c++) { Constant *constRowIdx = LocalBuilder.getInt32(c); Value *rowIdx = LocalBuilder.CreateAdd(idxVal, constRowIdx); for (unsigned r = 0; r < MatTy.getNumRows(); r++) { SmallVector<Value *, 4> args = {OpArg, ID, rowIdx, columnConsts[r]}; if (vertexOrPrimID) args.emplace_back(vertexOrPrimID); Value *input = LocalBuilder.CreateCall(ldStFunc, args); unsigned matIdx = MatTy.getColumnMajorIndex(r, c); matElts[matIdx] = input; } } } else { for (unsigned r = 0; r < MatTy.getNumRows(); r++) { Constant *constRowIdx = LocalBuilder.getInt32(r); Value *rowIdx = LocalBuilder.CreateAdd(idxVal, constRowIdx); for (unsigned c = 0; c < MatTy.getNumColumns(); c++) { SmallVector<Value *, 4> args = {OpArg, ID, rowIdx, columnConsts[c]}; if (vertexOrPrimID) args.emplace_back(vertexOrPrimID); Value *input = LocalBuilder.CreateCall(ldStFunc, args); unsigned matIdx = MatTy.getRowMajorIndex(r, c); matElts[matIdx] = input; } } } Value *newVec = HLMatrixLower::BuildVector(matElts[0]->getType(), matElts, LocalBuilder); newVec = MatTy.emitLoweredMemToReg(newVec, LocalBuilder); CI->replaceAllUsesWith(newVec); CI->eraseFromParent(); } void replaceDirectInputParameter(Value *param, Function *loadInput, unsigned cols, MutableArrayRef<Value *> args, bool bCast, OP *hlslOP, IRBuilder<> &Builder) { Value *zero = hlslOP->GetU32Const(0); Type *Ty = param->getType(); Type *EltTy = Ty->getScalarType(); if (VectorType *VT = dyn_cast<VectorType>(Ty)) { Value *newVec = llvm::UndefValue::get(VT); DXASSERT(cols == VT->getNumElements(), "vec size must match"); for (unsigned col = 0; col < cols; col++) { Value *colIdx = hlslOP->GetU8Const(col); args[DXIL::OperandIndex::kLoadInputColOpIdx] = colIdx; Value *input = GenerateLdInput(loadInput, args, Builder, zero, bCast, EltTy); newVec = Builder.CreateInsertElement(newVec, input, col); } param->replaceAllUsesWith(newVec); // THe individual loadInputs are the authoritative source of values for the // vector. dxilutil::TryScatterDebugValueToVectorElements(newVec); } else if (!Ty->isArrayTy() && !HLMatrixType::isa(Ty)) { DXASSERT(cols == 1, "only support scalar here"); Value *colIdx = hlslOP->GetU8Const(0); args[DXIL::OperandIndex::kLoadInputColOpIdx] = colIdx; Value *input = GenerateLdInput(loadInput, args, Builder, zero, bCast, EltTy); param->replaceAllUsesWith(input); // Will properly relocate any DbgValueInst } else if (HLMatrixType::isa(Ty)) { if (param->use_empty()) return; DXASSERT(param->hasOneUse(), "matrix arg should only has one use as matrix to vec"); CallInst *CI = cast<CallInst>(param->user_back()); HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); DXASSERT_LOCALVAR(group, group == HLOpcodeGroup::HLCast, "must be hlcast here"); unsigned opcode = GetHLOpcode(CI); HLCastOpcode matOp = static_cast<HLCastOpcode>(opcode); switch (matOp) { case HLCastOpcode::ColMatrixToVecCast: { IRBuilder<> LocalBuilder(CI); HLMatrixType MatTy = HLMatrixType::cast( CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx)->getType()); Type *EltTy = MatTy.getElementTypeForReg(); std::vector<Value *> matElts(MatTy.getNumElements()); for (unsigned c = 0; c < MatTy.getNumColumns(); c++) { Value *rowIdx = hlslOP->GetI32Const(c); args[DXIL::OperandIndex::kLoadInputRowOpIdx] = rowIdx; for (unsigned r = 0; r < MatTy.getNumRows(); r++) { Value *colIdx = hlslOP->GetU8Const(r); args[DXIL::OperandIndex::kLoadInputColOpIdx] = colIdx; Value *input = GenerateLdInput(loadInput, args, Builder, zero, bCast, EltTy); matElts[MatTy.getColumnMajorIndex(r, c)] = input; } } Value *newVec = HLMatrixLower::BuildVector(EltTy, matElts, LocalBuilder); CI->replaceAllUsesWith(newVec); CI->eraseFromParent(); } break; case HLCastOpcode::RowMatrixToVecCast: { IRBuilder<> LocalBuilder(CI); HLMatrixType MatTy = HLMatrixType::cast( CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx)->getType()); Type *EltTy = MatTy.getElementTypeForReg(); std::vector<Value *> matElts(MatTy.getNumElements()); for (unsigned r = 0; r < MatTy.getNumRows(); r++) { Value *rowIdx = hlslOP->GetI32Const(r); args[DXIL::OperandIndex::kLoadInputRowOpIdx] = rowIdx; for (unsigned c = 0; c < MatTy.getNumColumns(); c++) { Value *colIdx = hlslOP->GetU8Const(c); args[DXIL::OperandIndex::kLoadInputColOpIdx] = colIdx; Value *input = GenerateLdInput(loadInput, args, Builder, zero, bCast, EltTy); matElts[MatTy.getRowMajorIndex(r, c)] = input; } } Value *newVec = HLMatrixLower::BuildVector(EltTy, matElts, LocalBuilder); CI->replaceAllUsesWith(newVec); CI->eraseFromParent(); } break; default: // Only matrix to vector casts are valid. break; } } else { DXASSERT(0, "invalid type for direct input"); } } struct InputOutputAccessInfo { // For input output which has only 1 row, idx is 0. Value *idx; // VertexID for HS/DS/GS input, MS vertex output. PrimitiveID for MS primitive // output Value *vertexOrPrimID; // Vector index. Value *vectorIdx; // Load/Store/LoadMat/StoreMat on input/output. Instruction *user; InputOutputAccessInfo(Value *index, Instruction *I) : idx(index), vertexOrPrimID(nullptr), vectorIdx(nullptr), user(I) {} InputOutputAccessInfo(Value *index, Instruction *I, Value *ID, Value *vecIdx) : idx(index), vertexOrPrimID(ID), vectorIdx(vecIdx), user(I) {} }; void collectInputOutputAccessInfo( Value *GV, Constant *constZero, std::vector<InputOutputAccessInfo> &accessInfoList, bool hasVertexOrPrimID, bool bInput, bool bRowMajor, bool isMS) { // merge GEP use for input output. dxilutil::MergeGepUse(GV); for (auto User = GV->user_begin(); User != GV->user_end();) { Value *I = *(User++); if (LoadInst *ldInst = dyn_cast<LoadInst>(I)) { if (bInput) { InputOutputAccessInfo info = {constZero, ldInst}; accessInfoList.push_back(info); } } else if (StoreInst *stInst = dyn_cast<StoreInst>(I)) { if (!bInput) { InputOutputAccessInfo info = {constZero, stInst}; accessInfoList.push_back(info); } } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { // Vector indexing may has more indices. // Vector indexing changed to array indexing in SROA_HLSL. auto idx = GEP->idx_begin(); DXASSERT_LOCALVAR(idx, idx->get() == constZero, "only support 0 offset for input pointer"); Value *vertexOrPrimID = nullptr; Value *vectorIdx = nullptr; gep_type_iterator GEPIt = gep_type_begin(GEP), E = gep_type_end(GEP); // Skip first pointer idx which must be 0. GEPIt++; if (hasVertexOrPrimID) { // Save vertexOrPrimID. vertexOrPrimID = GEPIt.getOperand(); GEPIt++; } // Start from first index. Value *rowIdx = GEPIt.getOperand(); if (GEPIt != E) { if ((*GEPIt)->isVectorTy()) { // Vector indexing. rowIdx = constZero; vectorIdx = GEPIt.getOperand(); DXASSERT_NOMSG((++GEPIt) == E); } else { // Array which may have vector indexing. // Highest dim index is saved in rowIdx, // array size for highest dim not affect index. GEPIt++; IRBuilder<> Builder(GEP); Type *idxTy = rowIdx->getType(); for (; GEPIt != E; ++GEPIt) { DXASSERT(!GEPIt->isStructTy(), "Struct should be flattened SROA_Parameter_HLSL"); DXASSERT(!GEPIt->isPointerTy(), "not support pointer type in middle of GEP"); if (GEPIt->isArrayTy()) { Constant *arraySize = ConstantInt::get(idxTy, GEPIt->getArrayNumElements()); rowIdx = Builder.CreateMul(rowIdx, arraySize); rowIdx = Builder.CreateAdd(rowIdx, GEPIt.getOperand()); } else { Type *Ty = *GEPIt; DXASSERT_LOCALVAR(Ty, Ty->isVectorTy(), "must be vector type here to index"); // Save vector idx. vectorIdx = GEPIt.getOperand(); } } if (HLMatrixType MatTy = HLMatrixType::dyn_cast(*GEPIt)) { Constant *arraySize = ConstantInt::get(idxTy, MatTy.getNumColumns()); if (bRowMajor) { arraySize = ConstantInt::get(idxTy, MatTy.getNumRows()); } rowIdx = Builder.CreateMul(rowIdx, arraySize); } } } else rowIdx = constZero; auto GepUser = GEP->user_begin(); auto GepUserE = GEP->user_end(); Value *idxVal = rowIdx; for (; GepUser != GepUserE;) { auto GepUserIt = GepUser++; if (LoadInst *ldInst = dyn_cast<LoadInst>(*GepUserIt)) { if (bInput) { InputOutputAccessInfo info = {idxVal, ldInst, vertexOrPrimID, vectorIdx}; accessInfoList.push_back(info); } } else if (StoreInst *stInst = dyn_cast<StoreInst>(*GepUserIt)) { if (!bInput) { InputOutputAccessInfo info = {idxVal, stInst, vertexOrPrimID, vectorIdx}; accessInfoList.push_back(info); } } else if (CallInst *CI = dyn_cast<CallInst>(*GepUserIt)) { HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); DXASSERT_LOCALVAR(group, group == HLOpcodeGroup::HLMatLoadStore, "input/output should only used by ld/st"); HLMatLoadStoreOpcode opcode = (HLMatLoadStoreOpcode)GetHLOpcode(CI); if ((opcode == HLMatLoadStoreOpcode::ColMatLoad || opcode == HLMatLoadStoreOpcode::RowMatLoad) ? bInput : !bInput) { InputOutputAccessInfo info = {idxVal, CI, vertexOrPrimID, vectorIdx}; accessInfoList.push_back(info); } } else { DXASSERT(0, "input output should only used by ld/st"); } } } else if (CallInst *CI = dyn_cast<CallInst>(I)) { InputOutputAccessInfo info = {constZero, CI}; accessInfoList.push_back(info); } else { DXASSERT(0, "input output should only used by ld/st"); } } } void GenerateInputOutputUserCall(InputOutputAccessInfo &info, Value *undefVertexIdx, Function *ldStFunc, Constant *OpArg, Constant *ID, unsigned cols, bool bI1Cast, Constant *columnConsts[], bool bNeedVertexOrPrimID, bool isArrayTy, bool bInput, bool bIsInout) { Value *idxVal = info.idx; Value *vertexOrPrimID = undefVertexIdx; if (bNeedVertexOrPrimID && isArrayTy) { vertexOrPrimID = info.vertexOrPrimID; } if (LoadInst *ldInst = dyn_cast<LoadInst>(info.user)) { SmallVector<Value *, 4> args = {OpArg, ID, idxVal, info.vectorIdx}; if (vertexOrPrimID) args.emplace_back(vertexOrPrimID); replaceLdWithLdInput(ldStFunc, ldInst, cols, args, bI1Cast); } else if (StoreInst *stInst = dyn_cast<StoreInst>(info.user)) { if (bInput) { DXASSERT_LOCALVAR(bIsInout, bIsInout, "input should not have store use."); } else { if (!info.vectorIdx) { replaceStWithStOutput(ldStFunc, stInst, OpArg, ID, idxVal, cols, vertexOrPrimID, bI1Cast); } else { Value *V = stInst->getValueOperand(); Type *Ty = V->getType(); DXASSERT_LOCALVAR(Ty == Ty->getScalarType() && !Ty->isAggregateType(), Ty, "only support scalar here"); if (ConstantInt *ColIdx = dyn_cast<ConstantInt>(info.vectorIdx)) { IRBuilder<> Builder(stInst); if (ColIdx->getType()->getBitWidth() != 8) { ColIdx = Builder.getInt8(ColIdx->getValue().getLimitedValue()); } SmallVector<Value *, 6> args = {OpArg, ID, idxVal, ColIdx, V}; if (vertexOrPrimID) args.emplace_back(vertexOrPrimID); GenerateStOutput(ldStFunc, args, Builder, bI1Cast); } else { BasicBlock *BB = stInst->getParent(); BasicBlock *EndBB = BB->splitBasicBlock(stInst); TerminatorInst *TI = BB->getTerminator(); IRBuilder<> SwitchBuilder(TI); LLVMContext &Ctx = stInst->getContext(); SwitchInst *Switch = SwitchBuilder.CreateSwitch(info.vectorIdx, EndBB, cols); TI->eraseFromParent(); Function *F = EndBB->getParent(); for (unsigned i = 0; i < cols; i++) { BasicBlock *CaseBB = BasicBlock::Create(Ctx, "case", F, EndBB); Switch->addCase(SwitchBuilder.getInt32(i), CaseBB); IRBuilder<> CaseBuilder(CaseBB); ConstantInt *CaseIdx = SwitchBuilder.getInt8(i); SmallVector<Value *, 6> args = {OpArg, ID, idxVal, CaseIdx, V}; if (vertexOrPrimID) args.emplace_back(vertexOrPrimID); GenerateStOutput(ldStFunc, args, CaseBuilder, bI1Cast); CaseBuilder.CreateBr(EndBB); } } // remove stInst stInst->eraseFromParent(); } } } else if (CallInst *CI = dyn_cast<CallInst>(info.user)) { HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); // Intrinsic will be translated later. if (group == HLOpcodeGroup::HLIntrinsic || group == HLOpcodeGroup::NotHL) return; unsigned opcode = GetHLOpcode(CI); DXASSERT_NOMSG(group == HLOpcodeGroup::HLMatLoadStore); HLMatLoadStoreOpcode matOp = static_cast<HLMatLoadStoreOpcode>(opcode); switch (matOp) { case HLMatLoadStoreOpcode::ColMatLoad: case HLMatLoadStoreOpcode::RowMatLoad: { replaceMatLdWithLdInputs(CI, matOp, ldStFunc, OpArg, ID, columnConsts, vertexOrPrimID, idxVal); } break; case HLMatLoadStoreOpcode::ColMatStore: case HLMatLoadStoreOpcode::RowMatStore: { replaceMatStWithStOutputs(CI, matOp, ldStFunc, OpArg, ID, columnConsts, vertexOrPrimID, idxVal); } break; } } else { DXASSERT(0, "invalid operation on input output"); } } } // namespace void HLSignatureLower::GenerateDxilInputs() { GenerateDxilInputsOutputs(DXIL::SignatureKind::Input); } void HLSignatureLower::GenerateDxilOutputs() { GenerateDxilInputsOutputs(DXIL::SignatureKind::Output); } void HLSignatureLower::GenerateDxilPrimOutputs() { GenerateDxilInputsOutputs(DXIL::SignatureKind::PatchConstOrPrim); } void HLSignatureLower::GenerateDxilInputsOutputs(DXIL::SignatureKind SK) { OP *hlslOP = HLM.GetOP(); DxilFunctionProps &props = HLM.GetDxilFunctionProps(Entry); Module &M = *(HLM.GetModule()); OP::OpCode opcode = (OP::OpCode)-1; switch (SK) { case DXIL::SignatureKind::Input: opcode = OP::OpCode::LoadInput; break; case DXIL::SignatureKind::Output: opcode = props.IsMS() ? OP::OpCode::StoreVertexOutput : OP::OpCode::StoreOutput; break; case DXIL::SignatureKind::PatchConstOrPrim: opcode = OP::OpCode::StorePrimitiveOutput; break; default: DXASSERT_NOMSG(0); } bool bInput = SK == DXIL::SignatureKind::Input; bool bNeedVertexOrPrimID = bInput && (props.IsGS() || props.IsDS() || props.IsHS()); bNeedVertexOrPrimID |= !bInput && props.IsMS(); Constant *OpArg = hlslOP->GetU32Const((unsigned)opcode); Constant *columnConsts[] = { hlslOP->GetU8Const(0), hlslOP->GetU8Const(1), hlslOP->GetU8Const(2), hlslOP->GetU8Const(3), hlslOP->GetU8Const(4), hlslOP->GetU8Const(5), hlslOP->GetU8Const(6), hlslOP->GetU8Const(7), hlslOP->GetU8Const(8), hlslOP->GetU8Const(9), hlslOP->GetU8Const(10), hlslOP->GetU8Const(11), hlslOP->GetU8Const(12), hlslOP->GetU8Const(13), hlslOP->GetU8Const(14), hlslOP->GetU8Const(15)}; Constant *constZero = hlslOP->GetU32Const(0); Value *undefVertexIdx = props.IsMS() || !bInput ? nullptr : UndefValue::get(Type::getInt32Ty(HLM.GetCtx())); DxilSignature &Sig = bInput ? EntrySig.InputSignature : SK == DXIL::SignatureKind::Output ? EntrySig.OutputSignature : EntrySig.PatchConstOrPrimSignature; DxilTypeSystem &typeSys = HLM.GetTypeSystem(); DxilFunctionAnnotation *pFuncAnnot = typeSys.GetFunctionAnnotation(Entry); Type *i1Ty = Type::getInt1Ty(constZero->getContext()); Type *i32Ty = constZero->getType(); llvm::SmallVector<unsigned, 8> removeIndices; for (unsigned i = 0; i < Sig.GetElements().size(); i++) { DxilSignatureElement *SE = &Sig.GetElement(i); llvm::Type *Ty = SE->GetCompType().GetLLVMType(HLM.GetCtx()); // Cast i1 to i32 for load input. bool bI1Cast = false; if (Ty == i1Ty) { bI1Cast = true; Ty = i32Ty; } if (!hlslOP->IsOverloadLegal(opcode, Ty)) { std::string O; raw_string_ostream OSS(O); Ty->print(OSS); OSS << "(type for " << SE->GetName() << ")"; OSS << " cannot be used as shader inputs or outputs."; OSS.flush(); dxilutil::EmitErrorOnFunction(M.getContext(), Entry, O); continue; } Function *dxilFunc = hlslOP->GetOpFunc(opcode, Ty); Constant *ID = hlslOP->GetU32Const(i); unsigned cols = SE->GetCols(); Value *GV = m_sigValueMap[SE]; bool bIsInout = m_inoutArgSet.count(GV) > 0; IRBuilder<> EntryBuilder(Entry->getEntryBlock().getFirstInsertionPt()); if (DbgDeclareInst *DDI = llvm::FindAllocaDbgDeclare(GV)) { EntryBuilder.SetCurrentDebugLocation(DDI->getDebugLoc()); } DXIL::SemanticInterpretationKind SI = SE->GetInterpretation(); DXASSERT_NOMSG(SI < DXIL::SemanticInterpretationKind::Invalid); DXASSERT_NOMSG(SI != DXIL::SemanticInterpretationKind::NA); DXASSERT_NOMSG(SI != DXIL::SemanticInterpretationKind::NotInSig); if (SI == DXIL::SemanticInterpretationKind::Shadow) continue; // Handled in ProcessArgument if (!GV->getType()->isPointerTy()) { DXASSERT(bInput, "direct parameter must be input"); Value *vertexOrPrimID = undefVertexIdx; Value *args[] = {OpArg, ID, /*rowIdx*/ constZero, /*colIdx*/ nullptr, vertexOrPrimID}; replaceDirectInputParameter(GV, dxilFunc, cols, args, bI1Cast, hlslOP, EntryBuilder); continue; } bool bIsArrayTy = GV->getType()->getPointerElementType()->isArrayTy(); bool bIsPrecise = m_preciseSigSet.count(SE); if (bIsPrecise) HLModule::MarkPreciseAttributeOnPtrWithFunctionCall(GV, M); bool bRowMajor = false; if (Argument *Arg = dyn_cast<Argument>(GV)) { if (pFuncAnnot) { auto &paramAnnot = pFuncAnnot->GetParameterAnnotation(Arg->getArgNo()); if (paramAnnot.HasMatrixAnnotation()) bRowMajor = paramAnnot.GetMatrixAnnotation().Orientation == MatrixOrientation::RowMajor; } } std::vector<InputOutputAccessInfo> accessInfoList; collectInputOutputAccessInfo(GV, constZero, accessInfoList, bNeedVertexOrPrimID && bIsArrayTy, bInput, bRowMajor, props.IsMS()); for (InputOutputAccessInfo &info : accessInfoList) { GenerateInputOutputUserCall( info, undefVertexIdx, dxilFunc, OpArg, ID, cols, bI1Cast, columnConsts, bNeedVertexOrPrimID, bIsArrayTy, bInput, bIsInout); } } } void HLSignatureLower::GenerateDxilComputeAndNodeCommonInputs() { OP *hlslOP = HLM.GetOP(); DxilFunctionAnnotation *funcAnnotation = HLM.GetFunctionAnnotation(Entry); DXASSERT(funcAnnotation, "must find annotation for entry function"); auto &funcProps = HLM.GetDxilFunctionProps(Entry); IRBuilder<> Builder(Entry->getEntryBlock().getFirstInsertionPt()); for (Argument &arg : Entry->args()) { DxilParameterAnnotation &paramAnnotation = funcAnnotation->GetParameterAnnotation(arg.getArgNo()); llvm::StringRef semanticStr = paramAnnotation.GetSemanticString(); if (semanticStr.empty()) { if (funcProps.IsNode() && paramAnnotation.IsParamInputQualNode()) continue; dxilutil::EmitErrorOnFunction(HLM.GetModule()->getContext(), Entry, "Semantic must be defined for all " "parameters of an entry function or patch " "constant function."); return; } const Semantic *semantic = Semantic::GetByName(semanticStr, DXIL::SigPointKind::CSIn); OP::OpCode opcode; switch (semantic->GetKind()) { case Semantic::Kind::GroupThreadID: opcode = OP::OpCode::ThreadIdInGroup; break; case Semantic::Kind::GroupID: opcode = OP::OpCode::GroupId; break; case Semantic::Kind::DispatchThreadID: opcode = OP::OpCode::ThreadId; break; case Semantic::Kind::GroupIndex: opcode = OP::OpCode::FlattenedThreadIdInGroup; break; default: DXASSERT(semantic->IsInvalid(), "else compute shader semantics out-of-date"); dxilutil::EmitErrorOnFunction(HLM.GetModule()->getContext(), Entry, "invalid semantic found in CS"); return; } Constant *OpArg = hlslOP->GetU32Const((unsigned)opcode); Type *NumTy = arg.getType(); DXASSERT(!NumTy->isPointerTy(), "Unexpected byref value for CS SV_***ID semantic."); DXASSERT(NumTy->getScalarType()->isIntegerTy(), "Unexpected non-integer value for CS SV_***ID semantic."); // Always use the i32 overload of those intrinsics, and then cast as needed Function *dxilFunc = hlslOP->GetOpFunc(opcode, Builder.getInt32Ty()); Value *newArg = nullptr; if (opcode == OP::OpCode::FlattenedThreadIdInGroup) { newArg = Builder.CreateCall(dxilFunc, {OpArg}); } else { unsigned vecSize = 1; if (FixedVectorType *VT = dyn_cast<FixedVectorType>(NumTy)) vecSize = VT->getNumElements(); newArg = Builder.CreateCall(dxilFunc, {OpArg, hlslOP->GetU32Const(0)}); if (vecSize > 1) { Value *result = UndefValue::get(VectorType::get(Builder.getInt32Ty(), vecSize)); result = Builder.CreateInsertElement(result, newArg, (uint64_t)0); for (unsigned i = 1; i < vecSize; i++) { Value *newElt = Builder.CreateCall(dxilFunc, {OpArg, hlslOP->GetU32Const(i)}); result = Builder.CreateInsertElement(result, newElt, i); } newArg = result; } } // If the argument is of non-i32 type, convert here if (newArg->getType() != NumTy) newArg = Builder.CreateZExtOrTrunc(newArg, NumTy); if (newArg->getType() != arg.getType()) { DXASSERT_NOMSG(arg.getType()->isPointerTy()); for (User *U : arg.users()) { LoadInst *LI = cast<LoadInst>(U); LI->replaceAllUsesWith(newArg); } } else { arg.replaceAllUsesWith(newArg); } } } void HLSignatureLower::GenerateDxilPatchConstantLdSt() { OP *hlslOP = HLM.GetOP(); DxilFunctionProps &props = HLM.GetDxilFunctionProps(Entry); Module &M = *(HLM.GetModule()); Constant *constZero = hlslOP->GetU32Const(0); DxilSignature &Sig = EntrySig.PatchConstOrPrimSignature; DxilTypeSystem &typeSys = HLM.GetTypeSystem(); DxilFunctionAnnotation *pFuncAnnot = typeSys.GetFunctionAnnotation(Entry); auto InsertPt = Entry->getEntryBlock().getFirstInsertionPt(); const bool bIsHs = props.IsHS(); const bool bIsInput = !bIsHs; const bool bIsInout = false; const bool bNeedVertexOrPrimID = false; if (bIsHs) { DxilFunctionProps &EntryQual = HLM.GetDxilFunctionProps(Entry); Function *patchConstantFunc = EntryQual.ShaderProps.HS.patchConstantFunc; InsertPt = patchConstantFunc->getEntryBlock().getFirstInsertionPt(); pFuncAnnot = typeSys.GetFunctionAnnotation(patchConstantFunc); } IRBuilder<> Builder(InsertPt); Type *i1Ty = Builder.getInt1Ty(); Type *i32Ty = Builder.getInt32Ty(); // LoadPatchConst don't have vertexIdx operand. Value *undefVertexIdx = nullptr; Constant *columnConsts[] = { hlslOP->GetU8Const(0), hlslOP->GetU8Const(1), hlslOP->GetU8Const(2), hlslOP->GetU8Const(3), hlslOP->GetU8Const(4), hlslOP->GetU8Const(5), hlslOP->GetU8Const(6), hlslOP->GetU8Const(7), hlslOP->GetU8Const(8), hlslOP->GetU8Const(9), hlslOP->GetU8Const(10), hlslOP->GetU8Const(11), hlslOP->GetU8Const(12), hlslOP->GetU8Const(13), hlslOP->GetU8Const(14), hlslOP->GetU8Const(15)}; OP::OpCode opcode = bIsInput ? OP::OpCode::LoadPatchConstant : OP::OpCode::StorePatchConstant; Constant *OpArg = hlslOP->GetU32Const((unsigned)opcode); for (unsigned i = 0; i < Sig.GetElements().size(); i++) { DxilSignatureElement *SE = &Sig.GetElement(i); Value *GV = m_sigValueMap[SE]; DXIL::SemanticInterpretationKind SI = SE->GetInterpretation(); DXASSERT_NOMSG(SI < DXIL::SemanticInterpretationKind::Invalid); DXASSERT_NOMSG(SI != DXIL::SemanticInterpretationKind::NA); DXASSERT_NOMSG(SI != DXIL::SemanticInterpretationKind::NotInSig); if (SI == DXIL::SemanticInterpretationKind::Shadow) continue; // Handled in ProcessArgument Constant *ID = hlslOP->GetU32Const(i); // Generate LoadPatchConstant. Type *Ty = SE->GetCompType().GetLLVMType(HLM.GetCtx()); // Cast i1 to i32 for load input. bool bI1Cast = false; if (Ty == i1Ty) { bI1Cast = true; Ty = i32Ty; } unsigned cols = SE->GetCols(); Function *dxilFunc = hlslOP->GetOpFunc(opcode, Ty); if (!GV->getType()->isPointerTy()) { DXASSERT(bIsInput, "Must be DS input."); Constant *OpArg = hlslOP->GetU32Const( static_cast<unsigned>(OP::OpCode::LoadPatchConstant)); Value *args[] = {OpArg, ID, /*rowIdx*/ constZero, /*colIdx*/ nullptr}; replaceDirectInputParameter(GV, dxilFunc, cols, args, bI1Cast, hlslOP, Builder); continue; } bool bRowMajor = false; if (Argument *Arg = dyn_cast<Argument>(GV)) { if (pFuncAnnot) { auto &paramAnnot = pFuncAnnot->GetParameterAnnotation(Arg->getArgNo()); if (paramAnnot.HasMatrixAnnotation()) bRowMajor = paramAnnot.GetMatrixAnnotation().Orientation == MatrixOrientation::RowMajor; } } std::vector<InputOutputAccessInfo> accessInfoList; collectInputOutputAccessInfo(GV, constZero, accessInfoList, bNeedVertexOrPrimID, bIsInput, bRowMajor, false); bool bIsArrayTy = GV->getType()->getPointerElementType()->isArrayTy(); bool isPrecise = m_preciseSigSet.count(SE); if (isPrecise) HLModule::MarkPreciseAttributeOnPtrWithFunctionCall(GV, M); for (InputOutputAccessInfo &info : accessInfoList) { GenerateInputOutputUserCall( info, undefVertexIdx, dxilFunc, OpArg, ID, cols, bI1Cast, columnConsts, bNeedVertexOrPrimID, bIsArrayTy, bIsInput, bIsInout); } } } void HLSignatureLower::GenerateDxilPatchConstantFunctionInputs() { // Map input patch, to input sig // LoadOutputControlPoint for output patch . OP *hlslOP = HLM.GetOP(); Constant *constZero = hlslOP->GetU32Const(0); DxilFunctionProps &EntryQual = HLM.GetDxilFunctionProps(Entry); Function *patchConstantFunc = EntryQual.ShaderProps.HS.patchConstantFunc; DxilFunctionAnnotation *patchFuncAnnotation = HLM.GetFunctionAnnotation(patchConstantFunc); DXASSERT(patchFuncAnnotation, "must find annotation for patch constant function"); Type *i1Ty = Type::getInt1Ty(constZero->getContext()); Type *i32Ty = constZero->getType(); Constant *columnConsts[] = { hlslOP->GetU8Const(0), hlslOP->GetU8Const(1), hlslOP->GetU8Const(2), hlslOP->GetU8Const(3), hlslOP->GetU8Const(4), hlslOP->GetU8Const(5), hlslOP->GetU8Const(6), hlslOP->GetU8Const(7), hlslOP->GetU8Const(8), hlslOP->GetU8Const(9), hlslOP->GetU8Const(10), hlslOP->GetU8Const(11), hlslOP->GetU8Const(12), hlslOP->GetU8Const(13), hlslOP->GetU8Const(14), hlslOP->GetU8Const(15)}; for (Argument &arg : patchConstantFunc->args()) { DxilParameterAnnotation &paramAnnotation = patchFuncAnnotation->GetParameterAnnotation(arg.getArgNo()); DxilParamInputQual inputQual = paramAnnotation.GetParamInputQual(); if (inputQual == DxilParamInputQual::InputPatch || inputQual == DxilParamInputQual::OutputPatch) { DxilSignatureElement *SE = m_patchConstantInputsSigMap[arg.getArgNo()]; if (!SE) // Error should have been reported at an earlier stage. continue; Constant *inputID = hlslOP->GetU32Const(SE->GetID()); unsigned cols = SE->GetCols(); Type *Ty = SE->GetCompType().GetLLVMType(HLM.GetCtx()); // Cast i1 to i32 for load input. bool bI1Cast = false; if (Ty == i1Ty) { bI1Cast = true; Ty = i32Ty; } OP::OpCode opcode = inputQual == DxilParamInputQual::InputPatch ? OP::OpCode::LoadInput : OP::OpCode::LoadOutputControlPoint; Function *dxilLdFunc = hlslOP->GetOpFunc(opcode, Ty); bool bRowMajor = false; if (Argument *Arg = dyn_cast<Argument>(&arg)) { if (patchFuncAnnotation) { auto &paramAnnot = patchFuncAnnotation->GetParameterAnnotation(Arg->getArgNo()); if (paramAnnot.HasMatrixAnnotation()) bRowMajor = paramAnnot.GetMatrixAnnotation().Orientation == MatrixOrientation::RowMajor; } } std::vector<InputOutputAccessInfo> accessInfoList; collectInputOutputAccessInfo(&arg, constZero, accessInfoList, /*hasVertexOrPrimID*/ true, true, bRowMajor, false); for (InputOutputAccessInfo &info : accessInfoList) { Constant *OpArg = hlslOP->GetU32Const((unsigned)opcode); if (LoadInst *ldInst = dyn_cast<LoadInst>(info.user)) { Value *args[] = {OpArg, inputID, info.idx, info.vectorIdx, info.vertexOrPrimID}; replaceLdWithLdInput(dxilLdFunc, ldInst, cols, args, bI1Cast); } else if (CallInst *CI = dyn_cast<CallInst>(info.user)) { HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); // Intrinsic will be translated later. if (group == HLOpcodeGroup::HLIntrinsic || group == HLOpcodeGroup::NotHL) return; unsigned opcode = GetHLOpcode(CI); DXASSERT_NOMSG(group == HLOpcodeGroup::HLMatLoadStore); HLMatLoadStoreOpcode matOp = static_cast<HLMatLoadStoreOpcode>(opcode); if (matOp == HLMatLoadStoreOpcode::ColMatLoad || matOp == HLMatLoadStoreOpcode::RowMatLoad) replaceMatLdWithLdInputs(CI, matOp, dxilLdFunc, OpArg, inputID, columnConsts, info.vertexOrPrimID, info.idx); } else { DXASSERT(0, "input should only be ld"); } } } } } bool HLSignatureLower::HasClipPlanes() { if (!HLM.HasDxilFunctionProps(Entry)) return false; DxilFunctionProps &EntryQual = HLM.GetDxilFunctionProps(Entry); auto &VS = EntryQual.ShaderProps.VS; unsigned numClipPlanes = 0; for (unsigned i = 0; i < DXIL::kNumClipPlanes; i++) { if (!VS.clipPlanes[i]) break; numClipPlanes++; } return numClipPlanes != 0; } void HLSignatureLower::GenerateClipPlanesForVS(Value *outPosition) { DxilFunctionProps &EntryQual = HLM.GetDxilFunctionProps(Entry); auto &VS = EntryQual.ShaderProps.VS; unsigned numClipPlanes = 0; for (unsigned i = 0; i < DXIL::kNumClipPlanes; i++) { if (!VS.clipPlanes[i]) break; numClipPlanes++; } if (!numClipPlanes) return; LLVMContext &Ctx = HLM.GetCtx(); Function *dp4 = HLM.GetOP()->GetOpFunc(DXIL::OpCode::Dot4, Type::getFloatTy(Ctx)); Value *dp4Args[] = { ConstantInt::get(Type::getInt32Ty(Ctx), static_cast<unsigned>(DXIL::OpCode::Dot4)), nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, }; // out SV_Position should only have StoreInst use. // Done by LegalizeDxilInputOutputs in ScalarReplAggregatesHLSL.cpp for (User *U : outPosition->users()) { StoreInst *ST = cast<StoreInst>(U); Value *posVal = ST->getValueOperand(); DXASSERT(posVal->getType()->isVectorTy(), "SV_Position must be a vector"); IRBuilder<> Builder(ST); // Put position to args. for (unsigned i = 0; i < 4; i++) dp4Args[i + 1] = Builder.CreateExtractElement(posVal, i); // For each clip plane. // clipDistance = dp4 position, clipPlane. auto argIt = Entry->getArgumentList().rbegin(); for (int clipIdx = numClipPlanes - 1; clipIdx >= 0; clipIdx--) { Constant *GV = VS.clipPlanes[clipIdx]; DXASSERT_NOMSG(GV->hasOneUse()); StoreInst *ST = cast<StoreInst>(GV->user_back()); Value *clipPlane = ST->getValueOperand(); ST->eraseFromParent(); Argument &arg = *(argIt++); // Put clipPlane to args. for (unsigned i = 0; i < 4; i++) dp4Args[i + 5] = Builder.CreateExtractElement(clipPlane, i); Value *clipDistance = Builder.CreateCall(dp4, dp4Args); Builder.CreateStore(clipDistance, &arg); } } } namespace { Value *TranslateStreamAppend(CallInst *CI, unsigned ID, hlsl::OP *OP) { Function *DxilFunc = OP->GetOpFunc(OP::OpCode::EmitStream, CI->getType()); // TODO: generate a emit which has the data being emited as its argment. // Value *data = CI->getArgOperand(HLOperandIndex::kStreamAppendDataOpIndex); Constant *opArg = OP->GetU32Const((unsigned)OP::OpCode::EmitStream); IRBuilder<> Builder(CI); Constant *streamID = OP->GetU8Const(ID); Value *args[] = {opArg, streamID}; return Builder.CreateCall(DxilFunc, args); } Value *TranslateStreamCut(CallInst *CI, unsigned ID, hlsl::OP *OP) { Function *DxilFunc = OP->GetOpFunc(OP::OpCode::CutStream, CI->getType()); // TODO: generate a emit which has the data being emited as its argment. // Value *data = CI->getArgOperand(HLOperandIndex::kStreamAppendDataOpIndex); Constant *opArg = OP->GetU32Const((unsigned)OP::OpCode::CutStream); IRBuilder<> Builder(CI); Constant *streamID = OP->GetU8Const(ID); Value *args[] = {opArg, streamID}; return Builder.CreateCall(DxilFunc, args); } } // namespace // Generate DXIL stream output operation. void HLSignatureLower::GenerateStreamOutputOperation(Value *streamVal, unsigned ID) { OP *hlslOP = HLM.GetOP(); for (auto U = streamVal->user_begin(); U != streamVal->user_end();) { Value *user = *(U++); // Should only used by append, restartStrip . CallInst *CI = cast<CallInst>(user); HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); // Ignore user functions. if (group == HLOpcodeGroup::NotHL) continue; unsigned opcode = GetHLOpcode(CI); DXASSERT_LOCALVAR(group, group == HLOpcodeGroup::HLIntrinsic, "Must be HLIntrinsic here"); IntrinsicOp IOP = static_cast<IntrinsicOp>(opcode); switch (IOP) { case IntrinsicOp::MOP_Append: TranslateStreamAppend(CI, ID, hlslOP); break; case IntrinsicOp::MOP_RestartStrip: TranslateStreamCut(CI, ID, hlslOP); break; default: DXASSERT(0, "invalid operation on stream"); } CI->eraseFromParent(); } } // Generate DXIL stream output operations. void HLSignatureLower::GenerateStreamOutputOperations() { DxilFunctionAnnotation *EntryAnnotation = HLM.GetFunctionAnnotation(Entry); DXASSERT(EntryAnnotation, "must find annotation for entry function"); for (Argument &arg : Entry->getArgumentList()) { if (HLModule::IsStreamOutputPtrType(arg.getType())) { unsigned streamID = 0; DxilParameterAnnotation &paramAnnotation = EntryAnnotation->GetParameterAnnotation(arg.getArgNo()); DxilParamInputQual inputQual = paramAnnotation.GetParamInputQual(); switch (inputQual) { case DxilParamInputQual::OutStream0: streamID = 0; break; case DxilParamInputQual::OutStream1: streamID = 1; break; case DxilParamInputQual::OutStream2: streamID = 2; break; case DxilParamInputQual::OutStream3: default: DXASSERT(inputQual == DxilParamInputQual::OutStream3, "invalid input qual."); streamID = 3; break; } GenerateStreamOutputOperation(&arg, streamID); } } } // Generate DXIL EmitIndices operation. void HLSignatureLower::GenerateEmitIndicesOperation(Value *indicesOutput) { OP *hlslOP = HLM.GetOP(); Function *DxilFunc = hlslOP->GetOpFunc( OP::OpCode::EmitIndices, Type::getVoidTy(indicesOutput->getContext())); Constant *opArg = hlslOP->GetU32Const((unsigned)OP::OpCode::EmitIndices); for (auto U = indicesOutput->user_begin(); U != indicesOutput->user_end();) { Value *user = *(U++); GetElementPtrInst *GEP = cast<GetElementPtrInst>(user); auto idx = GEP->idx_begin(); DXASSERT_LOCALVAR(idx, idx->get() == hlslOP->GetU32Const(0), "only support 0 offset for input pointer"); gep_type_iterator GEPIt = gep_type_begin(GEP), E = gep_type_end(GEP); // Skip first pointer idx which must be 0. GEPIt++; Value *primIdx = GEPIt.getOperand(); DXASSERT(++GEPIt == E, "invalid GEP here"); (void)E; auto GepUser = GEP->user_begin(); auto GepUserE = GEP->user_end(); for (; GepUser != GepUserE;) { auto GepUserIt = GepUser++; StoreInst *stInst = cast<StoreInst>(*GepUserIt); Value *stVal = stInst->getValueOperand(); VectorType *VT = cast<VectorType>(stVal->getType()); unsigned eleCount = VT->getNumElements(); IRBuilder<> Builder(stInst); Value *subVal0 = Builder.CreateExtractElement(stVal, hlslOP->GetU32Const(0)); Value *subVal1 = Builder.CreateExtractElement(stVal, hlslOP->GetU32Const(1)); Value *subVal2 = eleCount == 3 ? Builder.CreateExtractElement(stVal, hlslOP->GetU32Const(2)) : hlslOP->GetU32Const(0); Value *args[] = {opArg, primIdx, subVal0, subVal1, subVal2}; Builder.CreateCall(DxilFunc, args); stInst->eraseFromParent(); } GEP->eraseFromParent(); } } // Generate DXIL EmitIndices operations. void HLSignatureLower::GenerateEmitIndicesOperations() { DxilFunctionAnnotation *EntryAnnotation = HLM.GetFunctionAnnotation(Entry); DXASSERT(EntryAnnotation, "must find annotation for entry function"); for (Argument &arg : Entry->getArgumentList()) { DxilParameterAnnotation &paramAnnotation = EntryAnnotation->GetParameterAnnotation(arg.getArgNo()); DxilParamInputQual inputQual = paramAnnotation.GetParamInputQual(); if (inputQual == DxilParamInputQual::OutIndices) { GenerateEmitIndicesOperation(&arg); } } } // Generate DXIL GetMeshPayload operation. void HLSignatureLower::GenerateGetMeshPayloadOperation() { DxilFunctionAnnotation *EntryAnnotation = HLM.GetFunctionAnnotation(Entry); DXASSERT(EntryAnnotation, "must find annotation for entry function"); for (Argument &arg : Entry->getArgumentList()) { DxilParameterAnnotation &paramAnnotation = EntryAnnotation->GetParameterAnnotation(arg.getArgNo()); DxilParamInputQual inputQual = paramAnnotation.GetParamInputQual(); if (inputQual == DxilParamInputQual::InPayload) { OP *hlslOP = HLM.GetOP(); Function *DxilFunc = hlslOP->GetOpFunc(OP::OpCode::GetMeshPayload, arg.getType()); Constant *opArg = hlslOP->GetU32Const((unsigned)OP::OpCode::GetMeshPayload); IRBuilder<> Builder( arg.getParent()->getEntryBlock().getFirstInsertionPt()); Value *args[] = {opArg}; Value *payload = Builder.CreateCall(DxilFunc, args); arg.replaceAllUsesWith(payload); } } } // Lower signatures. void HLSignatureLower::Run() { DxilFunctionProps &props = HLM.GetDxilFunctionProps(Entry); if (props.IsGraphics()) { if (props.IsMS()) { GenerateEmitIndicesOperations(); GenerateGetMeshPayloadOperation(); } CreateDxilSignatures(); // Allocate input output. AllocateDxilInputOutputs(); GenerateDxilInputs(); GenerateDxilOutputs(); if (props.IsMS()) { GenerateDxilPrimOutputs(); } } else if (props.IsCS() || props.IsNode()) { GenerateDxilComputeAndNodeCommonInputs(); } if (props.IsDS() || props.IsHS()) GenerateDxilPatchConstantLdSt(); if (props.IsHS()) GenerateDxilPatchConstantFunctionInputs(); if (props.IsGS()) GenerateStreamOutputOperations(); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLMatrixLowerPass.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLMatrixLowerPass.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // HLMatrixLowerPass implementation. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/HLMatrixLowerPass.h" #include "HLMatrixSubscriptUseReplacer.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilTypeSystem.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/HLMatrixLowerHelper.h" #include "dxc/HLSL/HLMatrixType.h" #include "dxc/HLSL/HLModule.h" #include "dxc/HLSL/HLOperations.h" #include "dxc/HlslIntrinsicOp.h" #include "dxc/Support/Global.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Local.h" #include <unordered_set> #include <vector> using namespace llvm; using namespace hlsl; using namespace hlsl::HLMatrixLower; namespace hlsl { namespace HLMatrixLower { Value *BuildVector(Type *EltTy, ArrayRef<llvm::Value *> elts, IRBuilder<> &Builder) { Value *Vec = UndefValue::get( VectorType::get(EltTy, static_cast<unsigned>(elts.size()))); for (unsigned i = 0; i < elts.size(); i++) Vec = Builder.CreateInsertElement(Vec, elts[i], i); return Vec; } } // namespace HLMatrixLower } // namespace hlsl namespace { // Creates and manages a set of temporary overloaded functions keyed on the // function type, and which should be destroyed when the pool gets out of scope. class TempOverloadPool { public: TempOverloadPool(llvm::Module &Module, const char *BaseName) : Module(Module), BaseName(BaseName) {} ~TempOverloadPool() { if (!Funcs.empty()) { // The flow has thrown an exception. Let that exception // propagate out and be reported as a compile error. } } Function *get(FunctionType *Ty); bool contains(FunctionType *Ty) const { return Funcs.count(Ty) != 0; } bool contains(Function *Func) const; void clear(); private: llvm::Module &Module; const char *BaseName; llvm::DenseMap<FunctionType *, Function *> Funcs; }; Function *TempOverloadPool::get(FunctionType *Ty) { auto It = Funcs.find(Ty); if (It != Funcs.end()) return It->second; std::string MangledName; raw_string_ostream MangledNameStream(MangledName); MangledNameStream << BaseName; MangledNameStream << '.'; Ty->print(MangledNameStream); MangledNameStream.flush(); Function *Func = cast<Function>(Module.getOrInsertFunction(MangledName, Ty)); Funcs.insert(std::make_pair(Ty, Func)); return Func; } bool TempOverloadPool::contains(Function *Func) const { auto It = Funcs.find(Func->getFunctionType()); return It != Funcs.end() && It->second == Func; } void TempOverloadPool::clear() { for (auto Entry : Funcs) { DXASSERT(Entry.second->use_empty(), "Temporary function still used during pool destruction."); Entry.second->eraseFromParent(); } Funcs.clear(); } // High-level matrix lowering pass. // // This pass converts matrices to their lowered vector representations, // including global variables, local variables and operations, // but not function signatures (arguments and return types) - left to // HLSignatureLower and HLMatrixBitcastLower, nor matrices obtained from // resources or constant - left to HLOperationLower. // // Algorithm overview: // 1. Find all matrix and matrix array global variables and lower them to // vectors. // Walk any GEPs and insert vec-to-mat translation stubs so that consuming // instructions keep dealing with matrix types for the moment. // 2. For each function // 2a. Lower all matrix and matrix array allocas, just like global variables. // 2b. Lower all other instructions producing or consuming matrices // // Conversion stubs are used to allow converting instructions in isolation, // and in an order-independent manner: // // Initial: MatInst1(MatInst2(MatInst3)) // After lowering MatInst2: MatInst1(VecToMat(VecInst2(MatToVec(MatInst3)))) // After lowering MatInst1: VecInst1(VecInst2(MatToVec(MatInst3))) // After lowering MatInst3: VecInst1(VecInst2(VecInst3)) class HLMatrixLowerPass : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit HLMatrixLowerPass() : ModulePass(ID) {} StringRef getPassName() const override { return "HL matrix lower"; } bool runOnModule(Module &M) override; private: void runOnFunction(Function &Func); void addToDeadInsts(Instruction *Inst) { m_deadInsts.emplace_back(Inst); } void deleteDeadInsts(); void getMatrixAllocasAndOtherInsts(Function &Func, std::vector<AllocaInst *> &MatAllocas, std::vector<Instruction *> &MatInsts); Value *getLoweredByValOperand(Value *Val, IRBuilder<> &Builder, bool DiscardStub = false); Value *tryGetLoweredPtrOperand(Value *Ptr, IRBuilder<> &Builder, bool DiscardStub = false); Value *bitCastValue(Value *SrcVal, Type *DstTy, bool DstTyAlloca, IRBuilder<> &Builder); void replaceAllUsesByLoweredValue(Instruction *MatInst, Value *VecVal); void replaceAllVariableUses(Value *MatPtr, Value *LoweredPtr); void replaceAllVariableUses(SmallVectorImpl<Value *> &GEPIdxStack, Value *StackTopPtr, Value *LoweredPtr); Value *translateScalarMatMul(Value *scalar, Value *mat, IRBuilder<> &Builder, bool isLhsScalar = true); void lowerGlobal(GlobalVariable *Global); Constant *lowerConstInitVal(Constant *Val); AllocaInst *lowerAlloca(AllocaInst *MatAlloca); void lowerInstruction(Instruction *Inst); void lowerReturn(ReturnInst *Return); Value *lowerCall(CallInst *Call); Value *lowerNonHLCall(CallInst *Call); void lowerPreciseCall(CallInst *Call, IRBuilder<> Builder); Value *lowerHLOperation(CallInst *Call, HLOpcodeGroup OpcodeGroup); Value *lowerHLIntrinsic(CallInst *Call, IntrinsicOp Opcode); Value *lowerHLMulIntrinsic(Value *Lhs, Value *Rhs, bool Unsigned, IRBuilder<> &Builder); Value *lowerHLTransposeIntrinsic(Value *MatVal, IRBuilder<> &Builder); Value *lowerHLDeterminantIntrinsic(Value *MatVal, IRBuilder<> &Builder); Value *lowerHLUnaryOperation(Value *MatVal, HLUnaryOpcode Opcode, IRBuilder<> &Builder); Value *lowerHLBinaryOperation(Value *Lhs, Value *Rhs, HLBinaryOpcode Opcode, IRBuilder<> &Builder); Value *lowerHLLoadStore(CallInst *Call, HLMatLoadStoreOpcode Opcode); Value *lowerHLLoad(CallInst *Call, Value *MatPtr, bool RowMajor, IRBuilder<> &Builder); Value *lowerHLStore(CallInst *Call, Value *MatVal, Value *MatPtr, bool RowMajor, bool Return, IRBuilder<> &Builder); Value *lowerHLCast(CallInst *Call, Value *Src, Type *DstTy, HLCastOpcode Opcode, IRBuilder<> &Builder); Value *lowerHLSubscript(CallInst *Call, HLSubscriptOpcode Opcode); Value *lowerHLMatElementSubscript(CallInst *Call, bool RowMajor); Value *lowerHLMatSubscript(CallInst *Call, bool RowMajor); void lowerHLMatSubscript(CallInst *Call, Value *MatPtr, SmallVectorImpl<Value *> &ElemIndices); Value *lowerHLInit(CallInst *Call); Value *lowerHLSelect(CallInst *Call); private: Module *m_pModule; HLModule *m_pHLModule; bool m_HasDbgInfo; // Pools for the translation stubs TempOverloadPool *m_matToVecStubs = nullptr; TempOverloadPool *m_vecToMatStubs = nullptr; std::vector<Instruction *> m_deadInsts; }; } // namespace char HLMatrixLowerPass::ID = 0; ModulePass *llvm::createHLMatrixLowerPass() { return new HLMatrixLowerPass(); } INITIALIZE_PASS(HLMatrixLowerPass, "hlmatrixlower", "HLSL High-Level Matrix Lower", false, false) bool HLMatrixLowerPass::runOnModule(Module &M) { TempOverloadPool matToVecStubs(M, "hlmatrixlower.mat2vec"); TempOverloadPool vecToMatStubs(M, "hlmatrixlower.vec2mat"); m_pModule = &M; m_pHLModule = &m_pModule->GetOrCreateHLModule(); // Load up debug information, to cross-reference values and the instructions // used to load them. m_HasDbgInfo = hasDebugInfo(M); m_matToVecStubs = &matToVecStubs; m_vecToMatStubs = &vecToMatStubs; // First, lower static global variables. // We need to accumulate them locally because we'll be creating new ones as we // lower them. std::vector<GlobalVariable *> Globals; for (GlobalVariable &Global : M.globals()) { if ((dxilutil::IsStaticGlobal(&Global) || dxilutil::IsSharedMemoryGlobal(&Global)) && HLMatrixType::isMatrixPtrOrArrayPtr(Global.getType())) { Globals.emplace_back(&Global); } } for (GlobalVariable *Global : Globals) lowerGlobal(Global); for (Function &F : M.functions()) { if (F.isDeclaration()) continue; runOnFunction(F); } m_pModule = nullptr; m_pHLModule = nullptr; m_matToVecStubs = nullptr; m_vecToMatStubs = nullptr; // If you hit an assert while clearing TempOverloadPool, // it means that either a matrix producer was lowered, // causing a translation stub to be created, // but the consumer of that matrix was never (properly) lowered. // Or the opposite: a matrix consumer was lowered and not its producer. matToVecStubs.clear(); vecToMatStubs.clear(); return true; } void HLMatrixLowerPass::runOnFunction(Function &Func) { // Skip hl function definition (like createhandle) if (hlsl::GetHLOpcodeGroupByName(&Func) != HLOpcodeGroup::NotHL) return; // Save the matrix instructions first since the translation process // will temporarily create other instructions consuming/producing matrix // types. std::vector<AllocaInst *> MatAllocas; std::vector<Instruction *> MatInsts; getMatrixAllocasAndOtherInsts(Func, MatAllocas, MatInsts); // First lower all allocas and take care of their GEP chains for (AllocaInst *MatAlloca : MatAllocas) { AllocaInst *LoweredAlloca = lowerAlloca(MatAlloca); replaceAllVariableUses(MatAlloca, LoweredAlloca); addToDeadInsts(MatAlloca); } // Now lower all other matrix instructions for (Instruction *MatInst : MatInsts) lowerInstruction(MatInst); deleteDeadInsts(); } void HLMatrixLowerPass::deleteDeadInsts() { while (!m_deadInsts.empty()) { Instruction *Inst = m_deadInsts.back(); m_deadInsts.pop_back(); DXASSERT_NOMSG(Inst->use_empty()); for (Value *Operand : Inst->operand_values()) { Instruction *OperandInst = dyn_cast<Instruction>(Operand); if (OperandInst && ++OperandInst->user_begin() == OperandInst->user_end()) { // We were its only user, erase recursively. // This will get rid of translation stubs: // Original: MatConsumer(MatProducer) // Producer lowered: MatConsumer(VecToMat(VecProducer)), MatProducer // dead Consumer lowered: VecConsumer(VecProducer)), // MatConsumer(VecToMat) dead Only by recursing on MatConsumer's operand // do we delete the VecToMat stub. DXASSERT_NOMSG(*OperandInst->user_begin() == Inst); m_deadInsts.emplace_back(OperandInst); } } Inst->eraseFromParent(); } } // Find all instructions consuming or producing matrices, // directly or through pointers/arrays. void HLMatrixLowerPass::getMatrixAllocasAndOtherInsts( Function &Func, std::vector<AllocaInst *> &MatAllocas, std::vector<Instruction *> &MatInsts) { for (BasicBlock &BasicBlock : Func) { for (Instruction &Inst : BasicBlock) { // Don't lower GEPs directly, we'll handle them as we lower the root // pointer, typically a global variable or alloca. if (isa<GetElementPtrInst>(&Inst)) continue; // Don't lower lifetime intrinsics here, we'll handle them as we lower the // alloca. IntrinsicInst *Intrin = dyn_cast<IntrinsicInst>(&Inst); if (Intrin && Intrin->getIntrinsicID() == Intrinsic::lifetime_start) continue; if (Intrin && Intrin->getIntrinsicID() == Intrinsic::lifetime_end) continue; if (AllocaInst *Alloca = dyn_cast<AllocaInst>(&Inst)) { if (HLMatrixType::isMatrixOrPtrOrArrayPtr(Alloca->getType())) { MatAllocas.emplace_back(Alloca); } continue; } if (CallInst *Call = dyn_cast<CallInst>(&Inst)) { // Lowering of global variables will have introduced // vec-to-mat translation stubs, which we deal with indirectly, // as we lower the instructions consuming them. if (m_vecToMatStubs->contains(Call->getCalledFunction())) continue; // Mat-to-vec stubs should only be introduced during instruction // lowering. Globals lowering won't introduce any because their only // operand is their initializer, which we can fully lower without // stubbing since it is constant. DXASSERT(!m_matToVecStubs->contains(Call->getCalledFunction()), "Unexpected mat-to-vec stubbing before function instruction " "lowering."); // Match matrix producers if (HLMatrixType::isMatrixOrPtrOrArrayPtr(Inst.getType())) { MatInsts.emplace_back(Call); continue; } // Match matrix consumers for (Value *Operand : Inst.operand_values()) { if (HLMatrixType::isMatrixOrPtrOrArrayPtr(Operand->getType())) { MatInsts.emplace_back(Call); break; } } continue; } if (ReturnInst *Return = dyn_cast<ReturnInst>(&Inst)) { Value *ReturnValue = Return->getReturnValue(); if (ReturnValue != nullptr && HLMatrixType::isMatrixOrPtrOrArrayPtr(ReturnValue->getType())) MatInsts.emplace_back(Return); continue; } // Nothing else should produce or consume matrices } } } // Gets the matrix-lowered representation of a value, potentially adding a // translation stub. DiscardStub causes any vec-to-mat translation stubs to be // deleted, it should be true only if the original instruction will be modified // and kept alive. If a new instruction is created and the original marked as // dead, then the remove dead instructions pass will take care of removing the // stub. Value *HLMatrixLowerPass::getLoweredByValOperand(Value *Val, IRBuilder<> &Builder, bool DiscardStub) { Type *Ty = Val->getType(); // We're only lowering byval matrices. // Since structs and arrays are always accessed by pointer, // we do not need to worry about a matrix being hidden inside a more complex // type. DXASSERT(!Ty->isPointerTy(), "Value cannot be a pointer."); HLMatrixType MatTy = HLMatrixType::dyn_cast(Ty); if (!MatTy) return Val; Type *LoweredTy = MatTy.getLoweredVectorTypeForReg(); // Check if the value is already a vec-to-mat translation stub if (CallInst *Call = dyn_cast<CallInst>(Val)) { if (m_vecToMatStubs->contains(Call->getCalledFunction())) { if (DiscardStub && Call->getNumUses() == 1) { Call->use_begin()->set(UndefValue::get(Call->getType())); addToDeadInsts(Call); } Value *LoweredVal = Call->getArgOperand(0); DXASSERT(LoweredVal->getType() == LoweredTy, "Unexpected already-lowered value type."); return LoweredVal; } } // Lower mat 0 to vec 0. if (isa<ConstantAggregateZero>(Val)) return ConstantAggregateZero::get(LoweredTy); // Lower undef mat as undef vec if (isa<UndefValue>(Val)) return UndefValue::get(LoweredTy); // Return a mat-to-vec translation stub FunctionType *TranslationStubTy = FunctionType::get(LoweredTy, {Ty}, /* isVarArg */ false); Function *TranslationStub = m_matToVecStubs->get(TranslationStubTy); return Builder.CreateCall(TranslationStub, {Val}); } // Attempts to retrieve the lowered vector pointer equivalent to a matrix // pointer. Returns nullptr if the pointed-to matrix lives in memory that cannot // be lowered at this time, for example a buffer or shader inputs/outputs, which // are lowered during signature lowering. Value *HLMatrixLowerPass::tryGetLoweredPtrOperand(Value *Ptr, IRBuilder<> &Builder, bool DiscardStub) { if (!HLMatrixType::isMatrixPtrOrArrayPtr(Ptr->getType())) return nullptr; // Matrix pointers can only be derived from Allocas, GlobalVariables or // resource accesses. The first two cases are what this pass must be able to // lower, and we should already have replaced their uses by vector to matrix // pointer translation stubs. if (CallInst *Call = dyn_cast<CallInst>(Ptr)) { if (m_vecToMatStubs->contains(Call->getCalledFunction())) { if (DiscardStub && Call->getNumUses() == 1) { Call->use_begin()->set(UndefValue::get(Call->getType())); addToDeadInsts(Call); } return Call->getArgOperand(0); } } // There's one more case to handle. // When compiling shader libraries, signatures won't have been lowered yet. // So we can have a matrix in a struct as an argument, // or an alloca'd struct holding the return value of a call and containing a // matrix. Value *RootPtr = Ptr; while (GEPOperator *GEP = dyn_cast<GEPOperator>(RootPtr)) RootPtr = GEP->getPointerOperand(); Argument *Arg = dyn_cast<Argument>(RootPtr); bool IsNonShaderArg = Arg != nullptr && !m_pHLModule->IsEntryThatUsesSignatures(Arg->getParent()); if (IsNonShaderArg || isa<AllocaInst>(RootPtr)) { // Bitcast the matrix pointer to its lowered equivalent. // The HLMatrixBitcast pass will take care of this later. return Builder.CreateBitCast(Ptr, HLMatrixType::getLoweredType(Ptr->getType())); } // The pointer must be derived from a resource, we don't handle it in this // pass. return nullptr; } // Bitcasts a value from matrix to vector or vice-versa. // This is used to convert to/from arguments/return values since we don't // lower signatures in this pass. The later HLMatrixBitcastLower pass fixes // this. Value *HLMatrixLowerPass::bitCastValue(Value *SrcVal, Type *DstTy, bool DstTyAlloca, IRBuilder<> &Builder) { Type *SrcTy = SrcVal->getType(); DXASSERT_NOMSG(!SrcTy->isPointerTy()); // We store and load from a temporary alloca, bitcasting either on the store // pointer or on the load pointer. IRBuilder<> AllocaBuilder( dxilutil::FindAllocaInsertionPt(Builder.GetInsertPoint())); Value *Alloca = AllocaBuilder.CreateAlloca(DstTyAlloca ? DstTy : SrcTy); Value *BitCastedAlloca = Builder.CreateBitCast( Alloca, (DstTyAlloca ? SrcTy : DstTy)->getPointerTo()); Builder.CreateStore(SrcVal, DstTyAlloca ? BitCastedAlloca : Alloca); return Builder.CreateLoad(DstTyAlloca ? Alloca : BitCastedAlloca); } // Replaces all uses of a matrix value by its lowered vector form, // inserting translation stubs for users which still expect a matrix value. void HLMatrixLowerPass::replaceAllUsesByLoweredValue(Instruction *MatInst, Value *VecVal) { if (VecVal == nullptr || VecVal == MatInst) return; DXASSERT(HLMatrixType::getLoweredType(MatInst->getType()) == VecVal->getType(), "Unexpected lowered value type."); Instruction *VecToMatStub = nullptr; while (!MatInst->use_empty()) { Use &ValUse = *MatInst->use_begin(); // Handle non-matrix cases, just point to the new value. if (MatInst->getType() == VecVal->getType()) { ValUse.set(VecVal); continue; } // If the user is already a matrix-to-vector translation stub, // we can now replace it by the proper vector value. if (CallInst *Call = dyn_cast<CallInst>(ValUse.getUser())) { if (m_matToVecStubs->contains(Call->getCalledFunction())) { Call->replaceAllUsesWith(VecVal); ValUse.set(UndefValue::get(MatInst->getType())); addToDeadInsts(Call); continue; } } // Otherwise, the user should point to a vector-to-matrix translation // stub of the new vector value. if (VecToMatStub == nullptr) { FunctionType *TranslationStubTy = FunctionType::get( MatInst->getType(), {VecVal->getType()}, /* isVarArg */ false); Function *TranslationStub = m_vecToMatStubs->get(TranslationStubTy); Instruction *PrevInst = dyn_cast<Instruction>(VecVal); if (PrevInst == nullptr) PrevInst = MatInst; IRBuilder<> Builder(PrevInst->getNextNode()); VecToMatStub = Builder.CreateCall(TranslationStub, {VecVal}); } ValUse.set(VecToMatStub); } } // Replaces all uses of a matrix or matrix array alloca or global variable by // its lowered equivalent. This doesn't lower the users, but will insert a // translation stub from the lowered value pointer back to the matrix value // pointer, and recreate any GEPs around the new pointer. Before: // User(GEP(MatrixArrayAlloca)) After: // User(VecToMatPtrStub(GEP'(VectorArrayAlloca))) void HLMatrixLowerPass::replaceAllVariableUses(Value *MatPtr, Value *LoweredPtr) { DXASSERT_NOMSG(HLMatrixType::isMatrixPtrOrArrayPtr(MatPtr->getType())); DXASSERT_NOMSG(LoweredPtr->getType() == HLMatrixType::getLoweredType(MatPtr->getType())); SmallVector<Value *, 4> GEPIdxStack; GEPIdxStack.emplace_back( ConstantInt::get(Type::getInt32Ty(MatPtr->getContext()), 0)); replaceAllVariableUses(GEPIdxStack, MatPtr, LoweredPtr); } void HLMatrixLowerPass::replaceAllVariableUses( SmallVectorImpl<Value *> &GEPIdxStack, Value *StackTopPtr, Value *LoweredPtr) { while (!StackTopPtr->use_empty()) { llvm::Use &Use = *StackTopPtr->use_begin(); if (GEPOperator *GEP = dyn_cast<GEPOperator>(Use.getUser())) { DXASSERT(GEP->getNumIndices() >= 1, "Unexpected degenerate GEP."); DXASSERT(cast<ConstantInt>(*GEP->idx_begin())->isZero(), "Unexpected non-zero first GEP index."); // Recurse in GEP to find actual users for (auto It = GEP->idx_begin() + 1; It != GEP->idx_end(); ++It) GEPIdxStack.emplace_back(*It); replaceAllVariableUses(GEPIdxStack, GEP, LoweredPtr); GEPIdxStack.erase(GEPIdxStack.end() - (GEP->getNumIndices() - 1), GEPIdxStack.end()); // Discard the GEP DXASSERT_NOMSG(GEP->use_empty()); if (GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(GEP)) { Use.set(UndefValue::get(Use->getType())); addToDeadInsts(GEPInst); } else { // constant GEP cast<Constant>(GEP)->destroyConstant(); } continue; } if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Use.getUser())) { DXASSERT(CE->getOpcode() == Instruction::AddrSpaceCast || CE->use_empty(), "Unexpected constant user"); replaceAllVariableUses(GEPIdxStack, CE, LoweredPtr); DXASSERT_NOMSG(CE->use_empty()); CE->destroyConstant(); continue; } if (AddrSpaceCastInst *CI = dyn_cast<AddrSpaceCastInst>(Use.getUser())) { replaceAllVariableUses(GEPIdxStack, CI, LoweredPtr); Use.set(UndefValue::get(Use->getType())); addToDeadInsts(CI); continue; } if (BitCastInst *BCI = dyn_cast<BitCastInst>(Use.getUser())) { // Replace bitcasts to i8* for lifetime intrinsics. if (BCI->getType()->isPointerTy() && BCI->getType()->getPointerElementType()->isIntegerTy(8)) { DXASSERT(onlyUsedByLifetimeMarkers(BCI), "bitcast to i8* must only be used by lifetime intrinsics"); Value *NewBCI = IRBuilder<>(BCI).CreateBitCast(LoweredPtr, BCI->getType()); // Replace all uses of the use. BCI->replaceAllUsesWith(NewBCI); // Remove the current use to end iteration. Use.set(UndefValue::get(Use->getType())); addToDeadInsts(BCI); continue; } } // Recreate the same GEP sequence, if any, on the lowered pointer IRBuilder<> Builder(cast<Instruction>(Use.getUser())); Value *LoweredStackTopPtr = GEPIdxStack.size() == 1 ? LoweredPtr : Builder.CreateGEP(LoweredPtr, GEPIdxStack); // Generate a stub translating the vector pointer back to a matrix pointer, // such that consuming instructions are unaffected. FunctionType *TranslationStubTy = FunctionType::get( StackTopPtr->getType(), {LoweredStackTopPtr->getType()}, /* isVarArg */ false); Function *TranslationStub = m_vecToMatStubs->get(TranslationStubTy); Use.set(Builder.CreateCall(TranslationStub, {LoweredStackTopPtr})); } } void HLMatrixLowerPass::lowerGlobal(GlobalVariable *Global) { if (Global->user_empty()) return; PointerType *LoweredPtrTy = cast<PointerType>(HLMatrixType::getLoweredType(Global->getType())); DXASSERT_NOMSG(LoweredPtrTy != Global->getType()); Constant *LoweredInitVal = Global->hasInitializer() ? lowerConstInitVal(Global->getInitializer()) : nullptr; GlobalVariable *LoweredGlobal = new GlobalVariable( *m_pModule, LoweredPtrTy->getElementType(), Global->isConstant(), Global->getLinkage(), LoweredInitVal, Global->getName() + ".v", /*InsertBefore*/ nullptr, Global->getThreadLocalMode(), Global->getType()->getAddressSpace()); // Calculate preferred alignment for the new global const llvm::DataLayout &DL = m_pModule->getDataLayout(); LoweredGlobal->setAlignment(DL.getPreferredAlignment(LoweredGlobal)); // Add debug info. if (m_HasDbgInfo) { DebugInfoFinder &Finder = m_pHLModule->GetOrCreateDebugInfoFinder(); HLModule::UpdateGlobalVariableDebugInfo(Global, Finder, LoweredGlobal); } replaceAllVariableUses(Global, LoweredGlobal); Global->removeDeadConstantUsers(); Global->eraseFromParent(); } Constant *HLMatrixLowerPass::lowerConstInitVal(Constant *Val) { Type *Ty = Val->getType(); // If it's an array of matrices, recurse for each element or nested array if (ArrayType *ArrayTy = dyn_cast<ArrayType>(Ty)) { SmallVector<Constant *, 4> LoweredElems; unsigned NumElems = ArrayTy->getNumElements(); LoweredElems.reserve(NumElems); for (unsigned ElemIdx = 0; ElemIdx < NumElems; ++ElemIdx) { Constant *ArrayElem = Val->getAggregateElement(ElemIdx); LoweredElems.emplace_back(lowerConstInitVal(ArrayElem)); } Type *LoweredElemTy = HLMatrixType::getLoweredType( ArrayTy->getElementType(), /*MemRepr*/ true); ArrayType *LoweredArrayTy = ArrayType::get(LoweredElemTy, NumElems); return ConstantArray::get(LoweredArrayTy, LoweredElems); } // Otherwise it's a matrix, lower it to a vector HLMatrixType MatTy = HLMatrixType::cast(Ty); DXASSERT_NOMSG(isa<StructType>(Ty)); Constant *RowArrayVal = Val->getAggregateElement((unsigned)0); // Original initializer should have been produced in row/column-major order // depending on the qualifiers of the target variable, so preserve the order. SmallVector<Constant *, 16> MatElems; for (unsigned RowIdx = 0; RowIdx < MatTy.getNumRows(); ++RowIdx) { Constant *RowVal = RowArrayVal->getAggregateElement(RowIdx); for (unsigned ColIdx = 0; ColIdx < MatTy.getNumColumns(); ++ColIdx) { MatElems.emplace_back(RowVal->getAggregateElement(ColIdx)); } } Constant *Vec = ConstantVector::get(MatElems); // Matrix elements are always in register representation, // but the lowered global variable is of vector type in // its memory representation, so we must convert here. // This will produce a constant so we can use an IRBuilder without a valid // insertion point. IRBuilder<> DummyBuilder(Val->getContext()); return cast<Constant>(MatTy.emitLoweredRegToMem(Vec, DummyBuilder)); } AllocaInst *HLMatrixLowerPass::lowerAlloca(AllocaInst *MatAlloca) { PointerType *LoweredAllocaTy = cast<PointerType>(HLMatrixType::getLoweredType(MatAlloca->getType())); IRBuilder<> Builder(MatAlloca); AllocaInst *LoweredAlloca = Builder.CreateAlloca( LoweredAllocaTy->getElementType(), nullptr, MatAlloca->getName()); // Update debug info. if (DbgDeclareInst *DbgDeclare = llvm::FindAllocaDbgDeclare(MatAlloca)) { DILocalVariable *DbgDeclareVar = DbgDeclare->getVariable(); DIExpression *DbgDeclareExpr = DbgDeclare->getExpression(); DIBuilder DIB(*MatAlloca->getModule()); DIB.insertDeclare(LoweredAlloca, DbgDeclareVar, DbgDeclareExpr, DbgDeclare->getDebugLoc(), DbgDeclare); } if (HLModule::HasPreciseAttributeWithMetadata(MatAlloca)) HLModule::MarkPreciseAttributeWithMetadata(LoweredAlloca); replaceAllVariableUses(MatAlloca, LoweredAlloca); return LoweredAlloca; } void HLMatrixLowerPass::lowerInstruction(Instruction *Inst) { if (CallInst *Call = dyn_cast<CallInst>(Inst)) { Value *LoweredValue = lowerCall(Call); // lowerCall returns the lowered value iff we should discard // the original matrix instruction and replace all of its uses // by the lowered value. It returns nullptr to opt-out of this. if (LoweredValue != nullptr) { replaceAllUsesByLoweredValue(Call, LoweredValue); addToDeadInsts(Inst); } } else if (ReturnInst *Return = dyn_cast<ReturnInst>(Inst)) { lowerReturn(Return); } else llvm_unreachable("Unexpected matrix instruction type."); } void HLMatrixLowerPass::lowerReturn(ReturnInst *Return) { Value *RetVal = Return->getReturnValue(); Type *RetTy = RetVal->getType(); DXASSERT_LOCALVAR(RetTy, !RetTy->isPointerTy(), "Unexpected matrix returned by pointer."); IRBuilder<> Builder(Return); Value *LoweredRetVal = getLoweredByValOperand(RetVal, Builder, /* DiscardStub */ true); // Since we're not lowering the signature, we can't return the lowered value // directly, so insert a bitcast, which HLMatrixBitcastLower knows how to // eliminate. Value *BitCastedRetVal = bitCastValue(LoweredRetVal, RetVal->getType(), /* DstTyAlloca */ false, Builder); Return->setOperand(0, BitCastedRetVal); } Value *HLMatrixLowerPass::lowerCall(CallInst *Call) { HLOpcodeGroup OpcodeGroup = GetHLOpcodeGroupByName(Call->getCalledFunction()); return OpcodeGroup == HLOpcodeGroup::NotHL ? lowerNonHLCall(Call) : lowerHLOperation(Call, OpcodeGroup); } // Special function to lower precise call applied to a matrix // The matrix should be lowered and the call regenerated with vector arg void HLMatrixLowerPass::lowerPreciseCall(CallInst *Call, IRBuilder<> Builder) { DXASSERT(Call->getNumArgOperands() == 1, "Only one arg expected for precise matrix call"); Value *Arg = Call->getArgOperand(0); Value *LoweredArg = getLoweredByValOperand(Arg, Builder); HLModule::MarkPreciseAttributeOnValWithFunctionCall(LoweredArg, Builder, *m_pModule); addToDeadInsts(Call); } Value *HLMatrixLowerPass::lowerNonHLCall(CallInst *Call) { // First, handle any operand of matrix-derived type // We don't lower the callee's signature in this pass, // so, for any matrix-typed parameter, we create a bitcast from the // lowered vector back to the matrix type, which the later // HLMatrixBitcastLower pass knows how to eliminate. IRBuilder<> PreCallBuilder(Call); unsigned NumArgs = Call->getNumArgOperands(); Function *Func = Call->getCalledFunction(); if (Func && HLModule::HasPreciseAttribute(Func)) { lowerPreciseCall(Call, PreCallBuilder); return nullptr; } for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) { Use &ArgUse = Call->getArgOperandUse(ArgIdx); if (ArgUse->getType()->isPointerTy()) { // Byref arg Value *LoweredArg = tryGetLoweredPtrOperand(ArgUse.get(), PreCallBuilder, /* DiscardStub */ true); if (LoweredArg != nullptr) { // Pointer to a matrix we've lowered, insert a bitcast back to matrix // pointer type. Value *BitCastedArg = PreCallBuilder.CreateBitCast(LoweredArg, ArgUse->getType()); ArgUse.set(BitCastedArg); } } else { // Byvalue arg Value *LoweredArg = getLoweredByValOperand(ArgUse.get(), PreCallBuilder, /* DiscardStub */ true); if (LoweredArg == ArgUse.get()) continue; Value *BitCastedArg = bitCastValue(LoweredArg, ArgUse->getType(), /* DstTyAlloca */ false, PreCallBuilder); ArgUse.set(BitCastedArg); } } // Now check the return type HLMatrixType RetMatTy = HLMatrixType::dyn_cast(Call->getType()); if (!RetMatTy) { DXASSERT(!HLMatrixType::isMatrixPtrOrArrayPtr(Call->getType()), "Unexpected user call returning a matrix by pointer."); // Nothing to replace, other instructions can consume a non-matrix return // type. return nullptr; } // The callee returns a matrix, and we don't lower signatures in this pass. // We perform a sketchy bitcast to the lowered register-representation type, // which the later HLMatrixBitcastLower pass knows how to eliminate. IRBuilder<> AllocaBuilder(dxilutil::FindAllocaInsertionPt(Call)); Value *LoweredAlloca = AllocaBuilder.CreateAlloca(RetMatTy.getLoweredVectorTypeForReg()); IRBuilder<> PostCallBuilder(Call->getNextNode()); Value *BitCastedAlloca = PostCallBuilder.CreateBitCast( LoweredAlloca, Call->getType()->getPointerTo()); // This is slightly tricky // We want to replace all uses of the matrix-returning call by the bitcasted // value, but the store to the bitcasted pointer itself is a use of that // matrix, so we need to create the load, replace the uses, and then insert // the store. LoadInst *LoweredVal = PostCallBuilder.CreateLoad(LoweredAlloca); replaceAllUsesByLoweredValue(Call, LoweredVal); // Now we can insert the store. Make sure to do so before the load. PostCallBuilder.SetInsertPoint(LoweredVal); PostCallBuilder.CreateStore(Call, BitCastedAlloca); // Return nullptr since we did our own uses replacement and we don't want // the matrix instruction to be marked as dead since we're still using it. return nullptr; } Value *HLMatrixLowerPass::lowerHLOperation(CallInst *Call, HLOpcodeGroup OpcodeGroup) { IRBuilder<> Builder(Call); switch (OpcodeGroup) { case HLOpcodeGroup::HLIntrinsic: return lowerHLIntrinsic(Call, static_cast<IntrinsicOp>(GetHLOpcode(Call))); case HLOpcodeGroup::HLBinOp: return lowerHLBinaryOperation( Call->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx), Call->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx), static_cast<HLBinaryOpcode>(GetHLOpcode(Call)), Builder); case HLOpcodeGroup::HLUnOp: return lowerHLUnaryOperation( Call->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx), static_cast<HLUnaryOpcode>(GetHLOpcode(Call)), Builder); case HLOpcodeGroup::HLMatLoadStore: return lowerHLLoadStore( Call, static_cast<HLMatLoadStoreOpcode>(GetHLOpcode(Call))); case HLOpcodeGroup::HLCast: return lowerHLCast( Call, Call->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx), Call->getType(), static_cast<HLCastOpcode>(GetHLOpcode(Call)), Builder); case HLOpcodeGroup::HLSubscript: return lowerHLSubscript(Call, static_cast<HLSubscriptOpcode>(GetHLOpcode(Call))); case HLOpcodeGroup::HLInit: return lowerHLInit(Call); case HLOpcodeGroup::HLSelect: return lowerHLSelect(Call); default: llvm_unreachable("Unexpected matrix opcode"); } } Value *HLMatrixLowerPass::lowerHLIntrinsic(CallInst *Call, IntrinsicOp Opcode) { IRBuilder<> Builder(Call); // See if this is a matrix-specific intrinsic which we should expand here switch (Opcode) { case IntrinsicOp::IOP_umul: case IntrinsicOp::IOP_mul: return lowerHLMulIntrinsic( Call->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx), Call->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx), /* Unsigned */ Opcode == IntrinsicOp::IOP_umul, Builder); case IntrinsicOp::IOP_transpose: return lowerHLTransposeIntrinsic( Call->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx), Builder); case IntrinsicOp::IOP_determinant: return lowerHLDeterminantIntrinsic( Call->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx), Builder); } // Delegate to a lowered intrinsic call SmallVector<Value *, 4> LoweredArgs; LoweredArgs.reserve(Call->getNumArgOperands()); for (Value *Arg : Call->arg_operands()) { if (Arg->getType()->isPointerTy()) { // ByRef parameter (for example, frexp's second parameter) // If the argument points to a lowered matrix variable, replace it here, // otherwise preserve the matrix type and let further passes handle the // lowering. Value *LoweredArg = tryGetLoweredPtrOperand(Arg, Builder); if (LoweredArg == nullptr) LoweredArg = Arg; LoweredArgs.emplace_back(LoweredArg); } else { LoweredArgs.emplace_back(getLoweredByValOperand(Arg, Builder)); } } Type *LoweredRetTy = HLMatrixType::getLoweredType(Call->getType()); return callHLFunction( *m_pModule, HLOpcodeGroup::HLIntrinsic, static_cast<unsigned>(Opcode), LoweredRetTy, LoweredArgs, Call->getCalledFunction()->getAttributes().getFnAttributes(), Builder); } // Handles multiplcation of a scalar with a matrix Value *HLMatrixLowerPass::translateScalarMatMul(Value *Lhs, Value *Rhs, IRBuilder<> &Builder, bool isLhsScalar) { Value *Mat = isLhsScalar ? Rhs : Lhs; Value *Scalar = isLhsScalar ? Lhs : Rhs; Value *LoweredMat = getLoweredByValOperand(Mat, Builder); Type *ScalarTy = Scalar->getType(); FixedVectorType *VT = dyn_cast<FixedVectorType>(LoweredMat->getType()); // Perform the scalar-matrix multiplication! Type *ElemTy = VT->getElementType(); bool isIntMulOp = ScalarTy->isIntegerTy() && ElemTy->isIntegerTy(); bool isFloatMulOp = ScalarTy->isFloatingPointTy() && ElemTy->isFloatingPointTy(); DXASSERT(ScalarTy == ElemTy, "Scalar type must match the matrix component type."); Value *Result = Builder.CreateVectorSplat(VT->getNumElements(), Scalar); if (isFloatMulOp) { // Preserve the order of operation for floats Result = isLhsScalar ? Builder.CreateFMul(Result, LoweredMat) : Builder.CreateFMul(LoweredMat, Result); } else if (isIntMulOp) { // Doesn't matter for integers but still preserve the order of operation Result = isLhsScalar ? Builder.CreateMul(Result, LoweredMat) : Builder.CreateMul(LoweredMat, Result); } else { DXASSERT( 0, "Unknown type encountered when doing scalar-matrix multiplication."); } return Result; } Value *HLMatrixLowerPass::lowerHLMulIntrinsic(Value *Lhs, Value *Rhs, bool Unsigned, IRBuilder<> &Builder) { HLMatrixType LhsMatTy = HLMatrixType::dyn_cast(Lhs->getType()); HLMatrixType RhsMatTy = HLMatrixType::dyn_cast(Rhs->getType()); Value *LoweredLhs = getLoweredByValOperand(Lhs, Builder); Value *LoweredRhs = getLoweredByValOperand(Rhs, Builder); // Translate multiplication of scalar with matrix bool isLhsScalar = !LoweredLhs->getType()->isVectorTy(); bool isRhsScalar = !LoweredRhs->getType()->isVectorTy(); bool isScalar = isLhsScalar || isRhsScalar; if (isScalar) return translateScalarMatMul(Lhs, Rhs, Builder, isLhsScalar); DXASSERT(LoweredLhs->getType()->getScalarType() == LoweredRhs->getType()->getScalarType(), "Unexpected element type mismatch in mul intrinsic."); DXASSERT(cast<VectorType>(LoweredLhs->getType()) && cast<VectorType>(LoweredRhs->getType()), "Unexpected scalar in lowered matrix mul intrinsic operands."); Type *ElemTy = LoweredLhs->getType()->getScalarType(); // Figure out the dimensions of each side unsigned LhsNumRows, LhsNumCols, RhsNumRows, RhsNumCols; if (LhsMatTy && RhsMatTy) { LhsNumRows = LhsMatTy.getNumRows(); LhsNumCols = LhsMatTy.getNumColumns(); RhsNumRows = RhsMatTy.getNumRows(); RhsNumCols = RhsMatTy.getNumColumns(); } else if (LhsMatTy) { LhsNumRows = LhsMatTy.getNumRows(); LhsNumCols = LhsMatTy.getNumColumns(); FixedVectorType *VT = dyn_cast<FixedVectorType>(LoweredRhs->getType()); RhsNumRows = VT->getNumElements(); RhsNumCols = 1; } else if (RhsMatTy) { LhsNumRows = 1; FixedVectorType *VT = dyn_cast<FixedVectorType>(LoweredLhs->getType()); LhsNumCols = VT->getNumElements(); RhsNumRows = RhsMatTy.getNumRows(); RhsNumCols = RhsMatTy.getNumColumns(); } else { llvm_unreachable("mul intrinsic was identified as a matrix operation but " "neither operand is a matrix."); } DXASSERT(LhsNumCols == RhsNumRows, "Matrix mul intrinsic operands dimensions mismatch."); HLMatrixType ResultMatTy(ElemTy, LhsNumRows, RhsNumCols); unsigned AccCount = LhsNumCols; // Get the multiply-and-add intrinsic function, we'll need it IntrinsicOp MadOpcode = Unsigned ? IntrinsicOp::IOP_umad : IntrinsicOp::IOP_mad; FunctionType *MadFuncTy = FunctionType::get( ElemTy, {Builder.getInt32Ty(), ElemTy, ElemTy, ElemTy}, false); Function *MadFunc = GetOrCreateHLFunction( *m_pModule, MadFuncTy, HLOpcodeGroup::HLIntrinsic, (unsigned)MadOpcode); Constant *MadOpcodeVal = Builder.getInt32((unsigned)MadOpcode); // Perform the multiplication! Value *Result = UndefValue::get(VectorType::get(ElemTy, LhsNumRows * RhsNumCols)); for (unsigned ResultRowIdx = 0; ResultRowIdx < ResultMatTy.getNumRows(); ++ResultRowIdx) { for (unsigned ResultColIdx = 0; ResultColIdx < ResultMatTy.getNumColumns(); ++ResultColIdx) { unsigned ResultElemIdx = ResultMatTy.getRowMajorIndex(ResultRowIdx, ResultColIdx); Value *ResultElem = nullptr; for (unsigned AccIdx = 0; AccIdx < AccCount; ++AccIdx) { unsigned LhsElemIdx = HLMatrixType::getRowMajorIndex( ResultRowIdx, AccIdx, LhsNumRows, LhsNumCols); unsigned RhsElemIdx = HLMatrixType::getRowMajorIndex( AccIdx, ResultColIdx, RhsNumRows, RhsNumCols); Value *LhsElem = Builder.CreateExtractElement( LoweredLhs, static_cast<uint64_t>(LhsElemIdx)); Value *RhsElem = Builder.CreateExtractElement( LoweredRhs, static_cast<uint64_t>(RhsElemIdx)); if (ResultElem == nullptr) { ResultElem = ElemTy->isFloatingPointTy() ? Builder.CreateFMul(LhsElem, RhsElem) : Builder.CreateMul(LhsElem, RhsElem); } else { ResultElem = Builder.CreateCall( MadFunc, {MadOpcodeVal, LhsElem, RhsElem, ResultElem}); } } Result = Builder.CreateInsertElement( Result, ResultElem, static_cast<uint64_t>(ResultElemIdx)); } } return Result; } Value *HLMatrixLowerPass::lowerHLTransposeIntrinsic(Value *MatVal, IRBuilder<> &Builder) { HLMatrixType MatTy = HLMatrixType::cast(MatVal->getType()); Value *LoweredVal = getLoweredByValOperand(MatVal, Builder); return MatTy.emitLoweredVectorRowToCol(LoweredVal, Builder); } static Value *determinant2x2(Value *M00, Value *M01, Value *M10, Value *M11, IRBuilder<> &Builder) { Value *Mul0 = Builder.CreateFMul(M00, M11); Value *Mul1 = Builder.CreateFMul(M01, M10); return Builder.CreateFSub(Mul0, Mul1); } static Value *determinant3x3(Value *M00, Value *M01, Value *M02, Value *M10, Value *M11, Value *M12, Value *M20, Value *M21, Value *M22, IRBuilder<> &Builder) { Value *Det00 = determinant2x2(M11, M12, M21, M22, Builder); Value *Det01 = determinant2x2(M10, M12, M20, M22, Builder); Value *Det02 = determinant2x2(M10, M11, M20, M21, Builder); Det00 = Builder.CreateFMul(M00, Det00); Det01 = Builder.CreateFMul(M01, Det01); Det02 = Builder.CreateFMul(M02, Det02); Value *Result = Builder.CreateFSub(Det00, Det01); Result = Builder.CreateFAdd(Result, Det02); return Result; } static Value *determinant4x4(Value *M00, Value *M01, Value *M02, Value *M03, Value *M10, Value *M11, Value *M12, Value *M13, Value *M20, Value *M21, Value *M22, Value *M23, Value *M30, Value *M31, Value *M32, Value *M33, IRBuilder<> &Builder) { Value *Det00 = determinant3x3(M11, M12, M13, M21, M22, M23, M31, M32, M33, Builder); Value *Det01 = determinant3x3(M10, M12, M13, M20, M22, M23, M30, M32, M33, Builder); Value *Det02 = determinant3x3(M10, M11, M13, M20, M21, M23, M30, M31, M33, Builder); Value *Det03 = determinant3x3(M10, M11, M12, M20, M21, M22, M30, M31, M32, Builder); Det00 = Builder.CreateFMul(M00, Det00); Det01 = Builder.CreateFMul(M01, Det01); Det02 = Builder.CreateFMul(M02, Det02); Det03 = Builder.CreateFMul(M03, Det03); Value *Result = Builder.CreateFSub(Det00, Det01); Result = Builder.CreateFAdd(Result, Det02); Result = Builder.CreateFSub(Result, Det03); return Result; } Value *HLMatrixLowerPass::lowerHLDeterminantIntrinsic(Value *MatVal, IRBuilder<> &Builder) { HLMatrixType MatTy = HLMatrixType::cast(MatVal->getType()); DXASSERT_NOMSG(MatTy.getNumColumns() == MatTy.getNumRows()); Value *LoweredVal = getLoweredByValOperand(MatVal, Builder); // Extract all matrix elements SmallVector<Value *, 16> Elems; for (unsigned ElemIdx = 0; ElemIdx < MatTy.getNumElements(); ++ElemIdx) Elems.emplace_back(Builder.CreateExtractElement( LoweredVal, static_cast<uint64_t>(ElemIdx))); // Delegate to appropriate determinant function switch (MatTy.getNumColumns()) { case 1: return Elems[0]; case 2: return determinant2x2(Elems[0], Elems[1], Elems[2], Elems[3], Builder); case 3: return determinant3x3(Elems[0], Elems[1], Elems[2], Elems[3], Elems[4], Elems[5], Elems[6], Elems[7], Elems[8], Builder); case 4: return determinant4x4(Elems[0], Elems[1], Elems[2], Elems[3], Elems[4], Elems[5], Elems[6], Elems[7], Elems[8], Elems[9], Elems[10], Elems[11], Elems[12], Elems[13], Elems[14], Elems[15], Builder); default: llvm_unreachable("Unexpected matrix dimensions."); } } Value *HLMatrixLowerPass::lowerHLUnaryOperation(Value *MatVal, HLUnaryOpcode Opcode, IRBuilder<> &Builder) { Value *LoweredVal = getLoweredByValOperand(MatVal, Builder); VectorType *VecTy = cast<VectorType>(LoweredVal->getType()); bool IsFloat = VecTy->getElementType()->isFloatingPointTy(); switch (Opcode) { case HLUnaryOpcode::Plus: return LoweredVal; // No-op case HLUnaryOpcode::Minus: return IsFloat ? Builder.CreateFSub(Constant::getNullValue(VecTy), LoweredVal) : Builder.CreateSub(Constant::getNullValue(VecTy), LoweredVal); case HLUnaryOpcode::LNot: return IsFloat ? Builder.CreateFCmp(CmpInst::FCMP_UEQ, LoweredVal, Constant::getNullValue(VecTy)) : Builder.CreateICmp(CmpInst::ICMP_EQ, LoweredVal, Constant::getNullValue(VecTy)); case HLUnaryOpcode::Not: return Builder.CreateXor(LoweredVal, Constant::getAllOnesValue(VecTy)); case HLUnaryOpcode::PostInc: case HLUnaryOpcode::PreInc: case HLUnaryOpcode::PostDec: case HLUnaryOpcode::PreDec: { Constant *ScalarOne = IsFloat ? ConstantFP::get(VecTy->getElementType(), 1) : ConstantInt::get(VecTy->getElementType(), 1); Constant *VecOne = ConstantVector::getSplat(VecTy->getNumElements(), ScalarOne); // CodeGen already emitted the load and following store, our job is only to // produce the updated value. if (Opcode == HLUnaryOpcode::PostInc || Opcode == HLUnaryOpcode::PreInc) { return IsFloat ? Builder.CreateFAdd(LoweredVal, VecOne) : Builder.CreateAdd(LoweredVal, VecOne); } else { return IsFloat ? Builder.CreateFSub(LoweredVal, VecOne) : Builder.CreateSub(LoweredVal, VecOne); } } default: llvm_unreachable("Unsupported unary matrix operator"); } } Value *HLMatrixLowerPass::lowerHLBinaryOperation(Value *Lhs, Value *Rhs, HLBinaryOpcode Opcode, IRBuilder<> &Builder) { Value *LoweredLhs = getLoweredByValOperand(Lhs, Builder); Value *LoweredRhs = getLoweredByValOperand(Rhs, Builder); DXASSERT(LoweredLhs->getType()->isVectorTy() && LoweredRhs->getType()->isVectorTy(), "Expected lowered binary operation operands to be vectors"); DXASSERT( LoweredLhs->getType() == LoweredRhs->getType(), "Expected lowered binary operation operands to have matching types."); FixedVectorType *VT = dyn_cast<FixedVectorType>(LoweredLhs->getType()); bool IsFloat = VT->getElementType()->isFloatingPointTy(); switch (Opcode) { case HLBinaryOpcode::Add: return IsFloat ? Builder.CreateFAdd(LoweredLhs, LoweredRhs) : Builder.CreateAdd(LoweredLhs, LoweredRhs); case HLBinaryOpcode::Sub: return IsFloat ? Builder.CreateFSub(LoweredLhs, LoweredRhs) : Builder.CreateSub(LoweredLhs, LoweredRhs); case HLBinaryOpcode::Mul: return IsFloat ? Builder.CreateFMul(LoweredLhs, LoweredRhs) : Builder.CreateMul(LoweredLhs, LoweredRhs); case HLBinaryOpcode::Div: return IsFloat ? Builder.CreateFDiv(LoweredLhs, LoweredRhs) : Builder.CreateSDiv(LoweredLhs, LoweredRhs); case HLBinaryOpcode::Rem: return IsFloat ? Builder.CreateFRem(LoweredLhs, LoweredRhs) : Builder.CreateSRem(LoweredLhs, LoweredRhs); case HLBinaryOpcode::And: return Builder.CreateAnd(LoweredLhs, LoweredRhs); case HLBinaryOpcode::Or: return Builder.CreateOr(LoweredLhs, LoweredRhs); case HLBinaryOpcode::Xor: return Builder.CreateXor(LoweredLhs, LoweredRhs); case HLBinaryOpcode::Shl: return Builder.CreateShl(LoweredLhs, LoweredRhs); case HLBinaryOpcode::Shr: return Builder.CreateAShr(LoweredLhs, LoweredRhs); case HLBinaryOpcode::LT: return IsFloat ? Builder.CreateFCmp(CmpInst::FCMP_OLT, LoweredLhs, LoweredRhs) : Builder.CreateICmp(CmpInst::ICMP_SLT, LoweredLhs, LoweredRhs); case HLBinaryOpcode::GT: return IsFloat ? Builder.CreateFCmp(CmpInst::FCMP_OGT, LoweredLhs, LoweredRhs) : Builder.CreateICmp(CmpInst::ICMP_SGT, LoweredLhs, LoweredRhs); case HLBinaryOpcode::LE: return IsFloat ? Builder.CreateFCmp(CmpInst::FCMP_OLE, LoweredLhs, LoweredRhs) : Builder.CreateICmp(CmpInst::ICMP_SLE, LoweredLhs, LoweredRhs); case HLBinaryOpcode::GE: return IsFloat ? Builder.CreateFCmp(CmpInst::FCMP_OGE, LoweredLhs, LoweredRhs) : Builder.CreateICmp(CmpInst::ICMP_SGE, LoweredLhs, LoweredRhs); case HLBinaryOpcode::EQ: return IsFloat ? Builder.CreateFCmp(CmpInst::FCMP_OEQ, LoweredLhs, LoweredRhs) : Builder.CreateICmp(CmpInst::ICMP_EQ, LoweredLhs, LoweredRhs); case HLBinaryOpcode::NE: return IsFloat ? Builder.CreateFCmp(CmpInst::FCMP_ONE, LoweredLhs, LoweredRhs) : Builder.CreateICmp(CmpInst::ICMP_NE, LoweredLhs, LoweredRhs); case HLBinaryOpcode::UDiv: return Builder.CreateUDiv(LoweredLhs, LoweredRhs); case HLBinaryOpcode::URem: return Builder.CreateURem(LoweredLhs, LoweredRhs); case HLBinaryOpcode::UShr: return Builder.CreateLShr(LoweredLhs, LoweredRhs); case HLBinaryOpcode::ULT: return Builder.CreateICmp(CmpInst::ICMP_ULT, LoweredLhs, LoweredRhs); case HLBinaryOpcode::UGT: return Builder.CreateICmp(CmpInst::ICMP_UGT, LoweredLhs, LoweredRhs); case HLBinaryOpcode::ULE: return Builder.CreateICmp(CmpInst::ICMP_ULE, LoweredLhs, LoweredRhs); case HLBinaryOpcode::UGE: return Builder.CreateICmp(CmpInst::ICMP_UGE, LoweredLhs, LoweredRhs); case HLBinaryOpcode::LAnd: case HLBinaryOpcode::LOr: { Value *Zero = Constant::getNullValue(LoweredLhs->getType()); Value *LhsCmp = IsFloat ? Builder.CreateFCmp(CmpInst::FCMP_ONE, LoweredLhs, Zero) : Builder.CreateICmp(CmpInst::ICMP_NE, LoweredLhs, Zero); Value *RhsCmp = IsFloat ? Builder.CreateFCmp(CmpInst::FCMP_ONE, LoweredRhs, Zero) : Builder.CreateICmp(CmpInst::ICMP_NE, LoweredRhs, Zero); return Opcode == HLBinaryOpcode::LOr ? Builder.CreateOr(LhsCmp, RhsCmp) : Builder.CreateAnd(LhsCmp, RhsCmp); } default: llvm_unreachable("Unsupported binary matrix operator"); } } Value *HLMatrixLowerPass::lowerHLLoadStore(CallInst *Call, HLMatLoadStoreOpcode Opcode) { IRBuilder<> Builder(Call); switch (Opcode) { case HLMatLoadStoreOpcode::RowMatLoad: case HLMatLoadStoreOpcode::ColMatLoad: return lowerHLLoad( Call, Call->getArgOperand(HLOperandIndex::kMatLoadPtrOpIdx), /* RowMajor */ Opcode == HLMatLoadStoreOpcode::RowMatLoad, Builder); case HLMatLoadStoreOpcode::RowMatStore: case HLMatLoadStoreOpcode::ColMatStore: return lowerHLStore( Call, Call->getArgOperand(HLOperandIndex::kMatStoreValOpIdx), Call->getArgOperand(HLOperandIndex::kMatStoreDstPtrOpIdx), /* RowMajor */ Opcode == HLMatLoadStoreOpcode::RowMatStore, /* Return */ !Call->getType()->isVoidTy(), Builder); default: llvm_unreachable("Unsupported matrix load/store operation"); } } Value *HLMatrixLowerPass::lowerHLLoad(CallInst *Call, Value *MatPtr, bool RowMajor, IRBuilder<> &Builder) { HLMatrixType MatTy = HLMatrixType::cast(MatPtr->getType()->getPointerElementType()); Value *LoweredPtr = tryGetLoweredPtrOperand(MatPtr, Builder); if (LoweredPtr == nullptr) { // Can't lower this here, defer to HL signature lower HLMatLoadStoreOpcode Opcode = RowMajor ? HLMatLoadStoreOpcode::RowMatLoad : HLMatLoadStoreOpcode::ColMatLoad; return callHLFunction( *m_pModule, HLOpcodeGroup::HLMatLoadStore, static_cast<unsigned>(Opcode), MatTy.getLoweredVectorTypeForReg(), {Builder.getInt32((uint32_t)Opcode), MatPtr}, Call->getCalledFunction()->getAttributes().getFnAttributes(), Builder); } return MatTy.emitLoweredLoad(LoweredPtr, Builder); } Value *HLMatrixLowerPass::lowerHLStore(CallInst *Call, Value *MatVal, Value *MatPtr, bool RowMajor, bool Return, IRBuilder<> &Builder) { DXASSERT(MatVal->getType() == MatPtr->getType()->getPointerElementType(), "Matrix store value/pointer type mismatch."); Value *LoweredPtr = tryGetLoweredPtrOperand(MatPtr, Builder); Value *LoweredVal = getLoweredByValOperand(MatVal, Builder); if (LoweredPtr == nullptr) { // Can't lower the pointer here, defer to HL signature lower HLMatLoadStoreOpcode Opcode = RowMajor ? HLMatLoadStoreOpcode::RowMatStore : HLMatLoadStoreOpcode::ColMatStore; return callHLFunction( *m_pModule, HLOpcodeGroup::HLMatLoadStore, static_cast<unsigned>(Opcode), Return ? LoweredVal->getType() : Builder.getVoidTy(), {Builder.getInt32((uint32_t)Opcode), MatPtr, LoweredVal}, Call->getCalledFunction()->getAttributes().getFnAttributes(), Builder); } HLMatrixType MatTy = HLMatrixType::cast(MatPtr->getType()->getPointerElementType()); StoreInst *LoweredStore = MatTy.emitLoweredStore(LoweredVal, LoweredPtr, Builder); // If the intrinsic returned a value, return the stored lowered value return Return ? LoweredVal : LoweredStore; } static Value *convertScalarOrVector(Value *SrcVal, Type *DstTy, HLCastOpcode Opcode, IRBuilder<> Builder) { DXASSERT(SrcVal->getType()->isVectorTy() == DstTy->isVectorTy(), "Scalar/vector type mismatch in numerical conversion."); Type *SrcTy = SrcVal->getType(); // Conversions between equivalent types are no-ops, // even between signed/unsigned variants. if (SrcTy == DstTy) return SrcVal; // Conversions to bools are comparisons if (DstTy->getScalarSizeInBits() == 1) { // fcmp une is what regular clang uses in C++ for (bool)f; return SrcTy->isIntOrIntVectorTy() ? Builder.CreateICmpNE( SrcVal, llvm::Constant::getNullValue(SrcTy), "tobool") : Builder.CreateFCmpUNE( SrcVal, llvm::Constant::getNullValue(SrcTy), "tobool"); } // Cast necessary bool SrcIsUnsigned = Opcode == HLCastOpcode::FromUnsignedCast || Opcode == HLCastOpcode::UnsignedUnsignedCast; bool DstIsUnsigned = Opcode == HLCastOpcode::ToUnsignedCast || Opcode == HLCastOpcode::UnsignedUnsignedCast; auto CastOp = static_cast<Instruction::CastOps>( HLModule::GetNumericCastOp(SrcTy, SrcIsUnsigned, DstTy, DstIsUnsigned)); return Builder.CreateCast(CastOp, SrcVal, DstTy); } Value *HLMatrixLowerPass::lowerHLCast(CallInst *Call, Value *Src, Type *DstTy, HLCastOpcode Opcode, IRBuilder<> &Builder) { // The opcode really doesn't mean much here, the types involved are what drive // most of the casting. DXASSERT(Opcode != HLCastOpcode::HandleToResCast, "Unexpected matrix cast opcode."); if (dxilutil::IsIntegerOrFloatingPointType(Src->getType())) { // Scalar to matrix splat HLMatrixType MatDstTy = HLMatrixType::cast(DstTy); // Apply element conversion Value *Result = convertScalarOrVector(Src, MatDstTy.getElementTypeForReg(), Opcode, Builder); // Splat to a vector Result = Builder.CreateInsertElement( UndefValue::get(VectorType::get(Result->getType(), 1)), Result, static_cast<uint64_t>(0)); return Builder.CreateShuffleVector( Result, Result, ConstantVector::getSplat(MatDstTy.getNumElements(), Builder.getInt32(0))); } else if (VectorType *SrcVecTy = dyn_cast<VectorType>(Src->getType())) { // Vector to matrix HLMatrixType MatDstTy = HLMatrixType::cast(DstTy); Value *Result = Src; // We might need to truncate if (MatDstTy.getNumElements() < SrcVecTy->getNumElements()) { SmallVector<int, 4> ShuffleIndices; for (unsigned Idx = 0; Idx < MatDstTy.getNumElements(); ++Idx) ShuffleIndices.emplace_back(static_cast<int>(Idx)); Result = Builder.CreateShuffleVector(Src, Src, ShuffleIndices); } // Apply element conversion return convertScalarOrVector(Result, MatDstTy.getLoweredVectorTypeForReg(), Opcode, Builder); } // Source must now be a matrix HLMatrixType MatSrcTy = HLMatrixType::cast(Src->getType()); VectorType *LoweredSrcTy = MatSrcTy.getLoweredVectorTypeForReg(); Value *LoweredSrc; if (isa<Argument>(Src)) { // Function arguments are lowered in HLSignatureLower. // Initial codegen first generates those cast intrinsics to tell us how to // lower them into vectors. Preserve them, but change the return type to // vector. DXASSERT(Opcode == HLCastOpcode::ColMatrixToVecCast || Opcode == HLCastOpcode::RowMatrixToVecCast, "Unexpected cast of matrix argument."); LoweredSrc = callHLFunction( *m_pModule, HLOpcodeGroup::HLCast, static_cast<unsigned>(Opcode), LoweredSrcTy, {Builder.getInt32((uint32_t)Opcode), Src}, Call->getCalledFunction()->getAttributes().getFnAttributes(), Builder); } else { LoweredSrc = getLoweredByValOperand(Src, Builder); } DXASSERT_NOMSG(LoweredSrc->getType() == LoweredSrcTy); Value *Result = LoweredSrc; Type *LoweredDstTy = DstTy; if (dxilutil::IsIntegerOrFloatingPointType(DstTy)) { // Matrix to scalar Result = Builder.CreateExtractElement(LoweredSrc, static_cast<uint64_t>(0)); } else if (FixedVectorType *DstVecTy = dyn_cast<FixedVectorType>(DstTy)) { // Matrix to vector DXASSERT(DstVecTy->getNumElements() <= LoweredSrcTy->getNumElements(), "Cannot cast matrix to a larger vector."); // We might have to truncate if (DstVecTy->getNumElements() < LoweredSrcTy->getNumElements()) { SmallVector<int, 3> ShuffleIndices; for (unsigned Idx = 0; Idx < DstVecTy->getNumElements(); ++Idx) ShuffleIndices.emplace_back(static_cast<int>(Idx)); Result = Builder.CreateShuffleVector(Result, Result, ShuffleIndices); } } else { // Destination must now be a matrix too HLMatrixType MatDstTy = HLMatrixType::cast(DstTy); // Apply any changes at the matrix level: orientation changes and truncation if (Opcode == HLCastOpcode::ColMatrixToRowMatrix) Result = MatSrcTy.emitLoweredVectorColToRow(Result, Builder); else if (Opcode == HLCastOpcode::RowMatrixToColMatrix) Result = MatSrcTy.emitLoweredVectorRowToCol(Result, Builder); else if (MatDstTy.getNumRows() != MatSrcTy.getNumRows() || MatDstTy.getNumColumns() != MatSrcTy.getNumColumns()) { // Apply truncation DXASSERT(MatDstTy.getNumRows() <= MatSrcTy.getNumRows() && MatDstTy.getNumColumns() <= MatSrcTy.getNumColumns(), "Unexpected matrix cast between incompatible dimensions."); SmallVector<int, 16> ShuffleIndices; for (unsigned RowIdx = 0; RowIdx < MatDstTy.getNumRows(); ++RowIdx) for (unsigned ColIdx = 0; ColIdx < MatDstTy.getNumColumns(); ++ColIdx) ShuffleIndices.emplace_back( static_cast<int>(MatSrcTy.getRowMajorIndex(RowIdx, ColIdx))); Result = Builder.CreateShuffleVector(Result, Result, ShuffleIndices); } LoweredDstTy = MatDstTy.getLoweredVectorTypeForReg(); DXASSERT(cast<FixedVectorType>(Result->getType())->getNumElements() == cast<FixedVectorType>(LoweredDstTy)->getNumElements(), "Unexpected matrix src/dst lowered element count mismatch after " "truncation."); } // Apply element conversion return convertScalarOrVector(Result, LoweredDstTy, Opcode, Builder); } Value *HLMatrixLowerPass::lowerHLSubscript(CallInst *Call, HLSubscriptOpcode Opcode) { switch (Opcode) { case HLSubscriptOpcode::RowMatElement: case HLSubscriptOpcode::ColMatElement: return lowerHLMatElementSubscript(Call, /* RowMajor */ Opcode == HLSubscriptOpcode::RowMatElement); case HLSubscriptOpcode::RowMatSubscript: case HLSubscriptOpcode::ColMatSubscript: return lowerHLMatSubscript(Call, /* RowMajor */ Opcode == HLSubscriptOpcode::RowMatSubscript); case HLSubscriptOpcode::DefaultSubscript: case HLSubscriptOpcode::CBufferSubscript: // Those get lowered during HLOperationLower, // and the return type must stay unchanged (as a matrix) // to provide the metadata to properly emit the loads. return nullptr; default: llvm_unreachable("Unexpected matrix subscript opcode."); } } Value *HLMatrixLowerPass::lowerHLMatElementSubscript(CallInst *Call, bool RowMajor) { (void)RowMajor; // It doesn't look like we actually need this? Value *MatPtr = Call->getArgOperand(HLOperandIndex::kMatSubscriptMatOpIdx); Constant *IdxVec = cast<Constant>( Call->getArgOperand(HLOperandIndex::kMatSubscriptSubOpIdx)); VectorType *IdxVecTy = cast<VectorType>(IdxVec->getType()); // Get the loaded lowered vector element indices SmallVector<Value *, 4> ElemIndices; ElemIndices.reserve(IdxVecTy->getNumElements()); for (unsigned VecIdx = 0; VecIdx < IdxVecTy->getNumElements(); ++VecIdx) { ElemIndices.emplace_back(IdxVec->getAggregateElement(VecIdx)); } lowerHLMatSubscript(Call, MatPtr, ElemIndices); // We did our own replacement of uses, opt-out of having the caller does it // for us. return nullptr; } Value *HLMatrixLowerPass::lowerHLMatSubscript(CallInst *Call, bool RowMajor) { (void)RowMajor; // It doesn't look like we actually need this? Value *MatPtr = Call->getArgOperand(HLOperandIndex::kMatSubscriptMatOpIdx); // Gather the indices, checking if they are all constant SmallVector<Value *, 4> ElemIndices; for (unsigned Idx = HLOperandIndex::kMatSubscriptSubOpIdx; Idx < Call->getNumArgOperands(); ++Idx) { ElemIndices.emplace_back(Call->getArgOperand(Idx)); } lowerHLMatSubscript(Call, MatPtr, ElemIndices); // We did our own replacement of uses, opt-out of having the caller does it // for us. return nullptr; } void HLMatrixLowerPass::lowerHLMatSubscript( CallInst *Call, Value *MatPtr, SmallVectorImpl<Value *> &ElemIndices) { DXASSERT_NOMSG(HLMatrixType::isMatrixPtr(MatPtr->getType())); IRBuilder<> CallBuilder(Call); Value *LoweredPtr = tryGetLoweredPtrOperand(MatPtr, CallBuilder); Value *LoweredMatrix = nullptr; Value *RootPtr = LoweredPtr ? LoweredPtr : MatPtr; while (GEPOperator *GEP = dyn_cast<GEPOperator>(RootPtr)) RootPtr = GEP->getPointerOperand(); if (LoweredPtr == nullptr) { if (!isa<Argument>(RootPtr)) return; // For a shader input, load the matrix into a lowered ptr // The load will be handled by LowerSignature HLMatLoadStoreOpcode Opcode = (HLSubscriptOpcode)GetHLOpcode(Call) == HLSubscriptOpcode::RowMatSubscript ? HLMatLoadStoreOpcode::RowMatLoad : HLMatLoadStoreOpcode::ColMatLoad; HLMatrixType MatTy = HLMatrixType::cast(MatPtr->getType()->getPointerElementType()); // Don't pass attributes from subscript (ReadNone) - load is ReadOnly. // Attributes will be set when HL function is created. // FIXME: This seems to indicate a potential bug, since the load should be // placed where pointer users would have loaded from the pointer. LoweredMatrix = callHLFunction( *m_pModule, HLOpcodeGroup::HLMatLoadStore, static_cast<unsigned>(Opcode), MatTy.getLoweredVectorTypeForReg(), {CallBuilder.getInt32((uint32_t)Opcode), MatPtr}, AttributeSet(), CallBuilder); } // For global variables, we can GEP directly into the lowered vector pointer. // This is necessary to support group shared memory atomics and the likes. bool AllowLoweredPtrGEPs = isa<GlobalVariable>(RootPtr); // Just constructing this does all the work HLMatrixSubscriptUseReplacer UseReplacer(Call, LoweredPtr, LoweredMatrix, ElemIndices, AllowLoweredPtrGEPs, m_deadInsts); DXASSERT(Call->use_empty(), "Expected all matrix subscript uses to have been replaced."); addToDeadInsts(Call); } Value *HLMatrixLowerPass::lowerHLInit(CallInst *Call) { DXASSERT(GetHLOpcode(Call) == 0, "Unexpected matrix init opcode."); // Figure out the result type HLMatrixType MatTy = HLMatrixType::cast(Call->getType()); VectorType *LoweredTy = MatTy.getLoweredVectorTypeForReg(); // Handle case where produced by EmitHLSLFlatConversion where there's one // vector argument, instead of scalar arguments. if (1 == Call->getNumArgOperands() - HLOperandIndex::kInitFirstArgOpIdx && Call->getArgOperand(HLOperandIndex::kInitFirstArgOpIdx) ->getType() ->isVectorTy()) { Value *LoweredVec = Call->getArgOperand(HLOperandIndex::kInitFirstArgOpIdx); DXASSERT(LoweredTy->getNumElements() == cast<FixedVectorType>(LoweredVec->getType())->getNumElements(), "Invalid matrix init argument vector element count."); return LoweredVec; } DXASSERT(LoweredTy->getNumElements() == Call->getNumArgOperands() - HLOperandIndex::kInitFirstArgOpIdx, "Invalid matrix init argument count."); // Build the result vector from the init args. // Both the args and the result vector are in row-major order, so no shuffling // is necessary. IRBuilder<> Builder(Call); Value *LoweredVec = UndefValue::get(LoweredTy); for (unsigned VecElemIdx = 0; VecElemIdx < LoweredTy->getNumElements(); ++VecElemIdx) { Value *ArgVal = Call->getArgOperand(HLOperandIndex::kInitFirstArgOpIdx + VecElemIdx); DXASSERT(dxilutil::IsIntegerOrFloatingPointType(ArgVal->getType()), "Expected only scalars in matrix initialization."); LoweredVec = Builder.CreateInsertElement(LoweredVec, ArgVal, static_cast<uint64_t>(VecElemIdx)); } return LoweredVec; } Value *HLMatrixLowerPass::lowerHLSelect(CallInst *Call) { DXASSERT(GetHLOpcode(Call) == 0, "Unexpected matrix init opcode."); Value *Cond = Call->getArgOperand(HLOperandIndex::kTrinaryOpSrc0Idx); Value *TrueMat = Call->getArgOperand(HLOperandIndex::kTrinaryOpSrc1Idx); Value *FalseMat = Call->getArgOperand(HLOperandIndex::kTrinaryOpSrc2Idx); DXASSERT(TrueMat->getType() == FalseMat->getType(), "Unexpected type mismatch between matrix ternary operator values."); #ifndef NDEBUG // Assert that if the condition is a matrix, it matches the dimensions of the // values if (HLMatrixType MatCondTy = HLMatrixType::dyn_cast(Cond->getType())) { HLMatrixType ValMatTy = HLMatrixType::cast(TrueMat->getType()); DXASSERT(MatCondTy.getNumRows() == ValMatTy.getNumRows() && MatCondTy.getNumColumns() == ValMatTy.getNumColumns(), "Unexpected mismatch between ternary operator condition and value " "matrix dimensions."); } #endif IRBuilder<> Builder(Call); Value *LoweredCond = getLoweredByValOperand(Cond, Builder); Value *LoweredTrueVec = getLoweredByValOperand(TrueMat, Builder); Value *LoweredFalseVec = getLoweredByValOperand(FalseMat, Builder); Value *Result = UndefValue::get(LoweredTrueVec->getType()); bool IsScalarCond = !LoweredCond->getType()->isVectorTy(); unsigned NumElems = cast<FixedVectorType>(Result->getType())->getNumElements(); for (uint64_t ElemIdx = 0; ElemIdx < NumElems; ++ElemIdx) { Value *ElemCond = IsScalarCond ? LoweredCond : Builder.CreateExtractElement(LoweredCond, ElemIdx); Value *ElemTrueVal = Builder.CreateExtractElement(LoweredTrueVec, ElemIdx); Value *ElemFalseVal = Builder.CreateExtractElement(LoweredFalseVec, ElemIdx); Value *ResultElem = Builder.CreateSelect(ElemCond, ElemTrueVal, ElemFalseVal); Result = Builder.CreateInsertElement(Result, ResultElem, ElemIdx); } return Result; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilRenameResourcesPass.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilRenameResourcesPass.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilModule.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" using namespace llvm; using namespace hlsl; // Rename resources with prefix namespace { class DxilRenameResources : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilRenameResources() : ModulePass(ID) {} void applyOptions(PassOptions O) override { GetPassOptionBool(O, "from-binding", &m_bFromBinding, false); GetPassOptionBool(O, "keep-name", &m_bKeepName, true); StringRef prefix; GetPassOption(O, "prefix", &prefix); m_Prefix = prefix.str(); } StringRef getPassName() const override { return "DXIL rename resources"; } bool runOnModule(Module &M) override { DxilModule &DM = M.GetOrCreateDxilModule(); bool bChanged = false; if (m_bFromBinding) { bChanged = DM.RenameResourceGlobalsWithBinding(m_bKeepName); } if (!m_Prefix.empty()) { bChanged |= DM.RenameResourcesWithPrefix(m_Prefix); } if (bChanged) { DM.ReEmitDxilResources(); } return bChanged; } private: bool m_bFromBinding; bool m_bKeepName; std::string m_Prefix; }; char DxilRenameResources::ID = 0; } // namespace ModulePass *llvm::createDxilRenameResourcesPass() { return new DxilRenameResources(); } INITIALIZE_PASS(DxilRenameResources, "dxil-rename-resources", "DXIL rename resources", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilContainerReflection.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilContainerReflection.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Provides support for reading DXIL container structures. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilCounters.h" #include "dxc/DXIL/DxilFunctionProps.h" #include "dxc/DXIL/DxilInstructions.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilPDB.h" #include "dxc/DXIL/DxilShaderModel.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/DxilContainer/DxilContainer.h" #include "dxc/HLSL/HLMatrixType.h" #include "dxc/Support/FileIOHelper.h" #include "dxc/Support/Global.h" #include "dxc/Support/Unicode.h" #include "dxc/Support/WinIncludes.h" #include "dxc/Support/dxcapi.impl.h" #include "dxc/Support/microcom.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Bitcode/ReaderWriter.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Operator.h" #include "llvm/ADT/SetVector.h" #include <unordered_set> #include "dxc/dxcapi.h" #include "dxc/Support/D3DReflection.h" #ifdef _WIN32 #include "d3d11shader.h" // for compatibility #else // Dummy D3D11 struct to allow nix-dead code to compile struct D3D11_SHADER_INPUT_BIND_DESC { int dummy; }; #include "dxc/WinAdapter.h" #endif #include "dxc/DxilContainer/DxilRuntimeReflection.h" // Remove this workaround once newer version of d3dcommon.h can be compiled // against #define ADD_16_64_BIT_TYPES #define ADD_SVC_BIT_FIELD const GUID IID_ID3D11ShaderReflection_43 = { 0x0a233719, 0x3960, 0x4578, {0x9d, 0x7c, 0x20, 0x3b, 0x8b, 0x1d, 0x9c, 0xc1}}; const GUID IID_ID3D11ShaderReflection_47 = { 0x8d536ca1, 0x0cca, 0x4956, {0xa8, 0x37, 0x78, 0x69, 0x63, 0x75, 0x55, 0x84}}; using namespace llvm; using namespace hlsl; using namespace hlsl::DXIL; class DxilContainerReflection : public IDxcContainerReflection { private: DXC_MICROCOM_TM_REF_FIELDS() CComPtr<IDxcBlob> m_container; const DxilContainerHeader *m_pHeader = nullptr; uint32_t m_headerLen = 0; bool IsLoaded() const { return m_pHeader != nullptr; } public: DXC_MICROCOM_TM_ADDREF_RELEASE_IMPL() DXC_MICROCOM_TM_CTOR(DxilContainerReflection) HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void **ppvObject) override { return DoBasicQueryInterface<IDxcContainerReflection>(this, iid, ppvObject); } HRESULT STDMETHODCALLTYPE Load(IDxcBlob *pContainer) override; HRESULT STDMETHODCALLTYPE GetPartCount(UINT32 *pResult) override; HRESULT STDMETHODCALLTYPE GetPartKind(UINT32 idx, UINT32 *pResult) override; HRESULT STDMETHODCALLTYPE GetPartContent(UINT32 idx, IDxcBlob **ppResult) override; HRESULT STDMETHODCALLTYPE FindFirstPartKind(UINT32 kind, UINT32 *pResult) override; HRESULT STDMETHODCALLTYPE GetPartReflection(UINT32 idx, REFIID iid, void **ppvObject) override; }; class CShaderReflectionConstantBuffer; class CShaderReflectionType; enum class PublicAPI { D3D12 = 0, D3D11_47 = 1, D3D11_43 = 2, Invalid }; #ifdef ADD_16_64_BIT_TYPES // Disable warning about value not being valid in enum #pragma warning(disable : 4063) #define D3D_SVT_INT16 ((D3D_SHADER_VARIABLE_TYPE)58) #define D3D_SVT_UINT16 ((D3D_SHADER_VARIABLE_TYPE)59) #define D3D_SVT_FLOAT16 ((D3D_SHADER_VARIABLE_TYPE)60) #define D3D_SVT_INT64 ((D3D_SHADER_VARIABLE_TYPE)61) #define D3D_SVT_UINT64 ((D3D_SHADER_VARIABLE_TYPE)62) #endif // ADD_16_64_BIT_TYPES #ifdef ADD_SVC_BIT_FIELD // Disable warning about value not being valid in enum #pragma warning(disable : 4063) // FIXME: remove the define once D3D_SVC_BIT_FIELD added into // D3D_SHADER_VARIABLE_CLASS. #define D3D_SVC_BIT_FIELD \ ((D3D_SHADER_VARIABLE_CLASS)(D3D_SVC_INTERFACE_POINTER + 1)) #endif class DxilModuleReflection { public: hlsl::RDAT::DxilRuntimeData m_RDAT; LLVMContext Context; std::unique_ptr<Module> m_pModule; // Must come after LLVMContext, otherwise // unique_ptr will over-delete. DxilModule *m_pDxilModule = nullptr; bool m_bUsageInMetadata = false; std::vector<std::unique_ptr<CShaderReflectionConstantBuffer>> m_CBs; std::vector<D3D12_SHADER_INPUT_BIND_DESC> m_Resources; std::vector<std::unique_ptr<CShaderReflectionType>> m_Types; // Key strings owned by CShaderReflectionConstantBuffer objects std::map<StringRef, UINT> m_CBsByName; // Due to the possibility of overlapping names between CB and other resources, // m_StructuredBufferCBsByName is the index into m_CBs corresponding to // StructuredBuffer resources, separately from CB resources. std::map<StringRef, UINT> m_StructuredBufferCBsByName; void CreateReflectionObjects(); void CreateReflectionObjectForResource(DxilResourceBase *R); HRESULT LoadRDAT(const DxilPartHeader *pPart); HRESULT LoadProgramHeader(const DxilProgramHeader *pProgramHeader); // Common code ID3D12ShaderReflectionConstantBuffer *_GetConstantBufferByIndex(UINT Index); ID3D12ShaderReflectionConstantBuffer *_GetConstantBufferByName(LPCSTR Name); HRESULT _GetResourceBindingDesc(UINT ResourceIndex, D3D12_SHADER_INPUT_BIND_DESC *pDesc, PublicAPI api = PublicAPI::D3D12); ID3D12ShaderReflectionVariable *_GetVariableByName(LPCSTR Name); HRESULT _GetResourceBindingDescByName(LPCSTR Name, D3D12_SHADER_INPUT_BIND_DESC *pDesc, PublicAPI api = PublicAPI::D3D12); }; class DxilShaderReflection : public DxilModuleReflection, public ID3D12ShaderReflection { private: DXC_MICROCOM_TM_REF_FIELDS() std::vector<D3D12_SIGNATURE_PARAMETER_DESC> m_InputSignature; std::vector<D3D12_SIGNATURE_PARAMETER_DESC> m_OutputSignature; std::vector<D3D12_SIGNATURE_PARAMETER_DESC> m_PatchConstantSignature; std::vector<std::unique_ptr<char[]>> m_UpperCaseNames; D3D12_SHADER_DESC m_Desc = {}; void SetCBufferUsage(); void CreateReflectionObjectsForSignature( const DxilSignature &Sig, std::vector<D3D12_SIGNATURE_PARAMETER_DESC> &Descs); LPCSTR CreateUpperCase(LPCSTR pValue); void MarkUsedSignatureElements(); void InitDesc(); public: PublicAPI m_PublicAPI; void SetPublicAPI(PublicAPI value) { m_PublicAPI = value; } static PublicAPI IIDToAPI(REFIID iid) { PublicAPI api = PublicAPI::Invalid; if (IsEqualIID(__uuidof(ID3D12ShaderReflection), iid)) api = PublicAPI::D3D12; else if (IsEqualIID(IID_ID3D11ShaderReflection_43, iid)) api = PublicAPI::D3D11_43; else if (IsEqualIID(IID_ID3D11ShaderReflection_47, iid)) api = PublicAPI::D3D11_47; return api; } DXC_MICROCOM_TM_ADDREF_RELEASE_IMPL() DXC_MICROCOM_TM_CTOR(DxilShaderReflection) HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void **ppvObject) noexcept override { HRESULT hr = E_NOINTERFACE; // There is non-standard handling of QueryInterface: // - although everything uses the same vtable as ID3D12ShaderReflection, // there are differences in behavior depending on the API version, and // there are 3 of these - it's not just d3d11 vs d3d12. // - when the object is created the API version is fixed // - from that point on, this object can only be QI'd for the matching API // version. PublicAPI api = IIDToAPI(iid); if (api == m_PublicAPI) { *ppvObject = static_cast<ID3D12ShaderReflection *>(this); this->AddRef(); hr = S_OK; } else if (IsEqualIID(__uuidof(IUnknown), iid)) { *ppvObject = static_cast<IUnknown *>(this); this->AddRef(); hr = S_OK; } return hr; } HRESULT Load(const DxilProgramHeader *pProgramHeader, const DxilPartHeader *pRDATPart); // ID3D12ShaderReflection STDMETHODIMP GetDesc(D3D12_SHADER_DESC *pDesc) noexcept override; STDMETHODIMP_(ID3D12ShaderReflectionConstantBuffer *) GetConstantBufferByIndex(UINT Index) noexcept override; STDMETHODIMP_(ID3D12ShaderReflectionConstantBuffer *) GetConstantBufferByName(LPCSTR Name) noexcept override; STDMETHODIMP GetResourceBindingDesc(UINT ResourceIndex, D3D12_SHADER_INPUT_BIND_DESC *pDesc) noexcept override; STDMETHODIMP GetInputParameterDesc( UINT ParameterIndex, D3D12_SIGNATURE_PARAMETER_DESC *pDesc) noexcept override; STDMETHODIMP GetOutputParameterDesc( UINT ParameterIndex, D3D12_SIGNATURE_PARAMETER_DESC *pDesc) noexcept override; STDMETHODIMP GetPatchConstantParameterDesc( UINT ParameterIndex, D3D12_SIGNATURE_PARAMETER_DESC *pDesc) noexcept override; STDMETHODIMP_(ID3D12ShaderReflectionVariable *) GetVariableByName(LPCSTR Name) noexcept override; STDMETHODIMP GetResourceBindingDescByName( LPCSTR Name, D3D12_SHADER_INPUT_BIND_DESC *pDesc) noexcept override; STDMETHODIMP_(UINT) GetMovInstructionCount(THIS) noexcept override; STDMETHODIMP_(UINT) GetMovcInstructionCount(THIS) noexcept override; STDMETHODIMP_(UINT) GetConversionInstructionCount(THIS) noexcept override; STDMETHODIMP_(UINT) GetBitwiseInstructionCount(THIS) noexcept override; STDMETHODIMP_(D3D_PRIMITIVE) GetGSInputPrimitive(THIS) noexcept override; STDMETHODIMP_(BOOL) IsSampleFrequencyShader(THIS) noexcept override; STDMETHODIMP_(UINT) GetNumInterfaceSlots(THIS) noexcept override; STDMETHODIMP GetMinFeatureLevel(D3D_FEATURE_LEVEL *pLevel) noexcept override; STDMETHODIMP_(UINT) GetThreadGroupSize(UINT *pSizeX, UINT *pSizeY, UINT *pSizeZ) noexcept override; STDMETHODIMP_(UINT64) GetRequiresFlags(THIS) noexcept override; }; class CFunctionReflection; class DxilLibraryReflection : public DxilModuleReflection, public ID3D12LibraryReflection { private: DXC_MICROCOM_TM_REF_FIELDS() // Storage, and function by name: typedef DenseMap<StringRef, std::unique_ptr<CFunctionReflection>> FunctionMap; typedef DenseMap<const Function *, CFunctionReflection *> FunctionsByPtr; FunctionMap m_FunctionMap; FunctionsByPtr m_FunctionsByPtr; // Enable indexing into functions in deterministic order: std::vector<CFunctionReflection *> m_FunctionVector; void AddResourceDependencies(); void SetCBufferUsage(); public: DXC_MICROCOM_TM_ADDREF_RELEASE_IMPL() DXC_MICROCOM_TM_CTOR(DxilLibraryReflection) HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void **ppvObject) noexcept override { return DoBasicQueryInterface<ID3D12LibraryReflection>(this, iid, ppvObject); } HRESULT Load(const DxilProgramHeader *pProgramHeader, const DxilPartHeader *pRDATPart); // ID3D12LibraryReflection STDMETHOD(GetDesc)(D3D12_LIBRARY_DESC *pDesc) override; STDMETHOD_(ID3D12FunctionReflection *, GetFunctionByIndex) (INT FunctionIndex) override; }; namespace hlsl { HRESULT CreateDxilShaderReflection(const DxilProgramHeader *pProgramHeader, const DxilPartHeader *pRDATPart, REFIID iid, void **ppvObject) { if (!ppvObject) return E_INVALIDARG; PublicAPI api = DxilShaderReflection::IIDToAPI(iid); if (api == PublicAPI::Invalid) { if (IsEqualIID(__uuidof(IUnknown), iid)) api = PublicAPI::D3D12; else return E_NOINTERFACE; } CComPtr<DxilShaderReflection> pReflection = DxilShaderReflection::Alloc(DxcGetThreadMallocNoRef()); IFROOM(pReflection.p); pReflection->SetPublicAPI(api); // pRDATPart to be used for transition. IFR(pReflection->Load(pProgramHeader, pRDATPart)); IFR(pReflection.p->QueryInterface(iid, ppvObject)); return S_OK; } HRESULT CreateDxilLibraryReflection(const DxilProgramHeader *pProgramHeader, const DxilPartHeader *pRDATPart, REFIID iid, void **ppvObject) { if (!ppvObject) return E_INVALIDARG; if (!IsEqualIID(__uuidof(ID3D12LibraryReflection), iid) && !IsEqualIID(__uuidof(IUnknown), iid)) return E_NOINTERFACE; CComPtr<DxilLibraryReflection> pReflection = DxilLibraryReflection::Alloc(DxcGetThreadMallocNoRef()); IFROOM(pReflection.p); // pRDATPart used for resource usage per-function. IFR(pReflection->Load(pProgramHeader, pRDATPart)); IFR(pReflection.p->QueryInterface(iid, ppvObject)); return S_OK; } HRESULT CreateDxilShaderOrLibraryReflectionFromProgramHeader( const DxilProgramHeader *pProgramHeader, const DxilPartHeader *pRDATPart, REFIID iid, void **ppvObject) { // Detect whether library, or if unrecognized program version. DXIL::ShaderKind SK = GetVersionShaderType(pProgramHeader->ProgramVersion); if (!(SK < DXIL::ShaderKind::Invalid)) return E_INVALIDARG; bool bIsLibrary = DXIL::ShaderKind::Library == SK; if (bIsLibrary) { IFR(hlsl::CreateDxilLibraryReflection(pProgramHeader, pRDATPart, iid, ppvObject)); } else { IFR(hlsl::CreateDxilShaderReflection(pProgramHeader, pRDATPart, iid, ppvObject)); } return S_OK; } bool IsValidReflectionModulePart(DxilFourCC fourCC) { return fourCC == DFCC_DXIL || fourCC == DFCC_ShaderDebugInfoDXIL || fourCC == DFCC_ShaderStatistics; } HRESULT CreateDxilShaderOrLibraryReflectionFromModulePart( const DxilPartHeader *pModulePart, const DxilPartHeader *pRDATPart, REFIID iid, void **ppvObject) { if (!pModulePart) return E_INVALIDARG; if (!IsValidReflectionModulePart((DxilFourCC)pModulePart->PartFourCC)) return E_INVALIDARG; const DxilProgramHeader *pProgramHeader = reinterpret_cast<const DxilProgramHeader *>(GetDxilPartData(pModulePart)); if (!IsValidDxilProgramHeader(pProgramHeader, pModulePart->PartSize)) return E_INVALIDARG; // If bitcode is too small, it's probably been stripped, and we cannot create // reflection with it. if (pModulePart->PartSize - pProgramHeader->BitcodeHeader.BitcodeOffset < 4) return DXC_E_MISSING_PART; return CreateDxilShaderOrLibraryReflectionFromProgramHeader( pProgramHeader, pRDATPart, iid, ppvObject); } } // namespace hlsl HRESULT DxilContainerReflection::Load(IDxcBlob *pContainer) { if (pContainer == nullptr) { m_container.Release(); m_pHeader = nullptr; m_headerLen = 0; return S_OK; } CComPtr<IDxcBlob> pPDBContainer; try { DxcThreadMalloc DxcMalloc(m_pMalloc); CComPtr<IStream> pStream; IFR(hlsl::CreateReadOnlyBlobStream(pContainer, &pStream)); if (SUCCEEDED(hlsl::pdb::LoadDataFromStream(m_pMalloc, pStream, &pPDBContainer))) { pContainer = pPDBContainer; } } CATCH_CPP_RETURN_HRESULT(); uint32_t bufLen = pContainer->GetBufferSize(); const DxilContainerHeader *pHeader = IsDxilContainerLike(pContainer->GetBufferPointer(), bufLen); if (pHeader == nullptr) { return E_INVALIDARG; } if (!IsValidDxilContainer(pHeader, bufLen)) { return E_INVALIDARG; } m_container = pContainer; m_headerLen = bufLen; m_pHeader = pHeader; return S_OK; } HRESULT DxilContainerReflection::GetPartCount(UINT32 *pResult) { if (pResult == nullptr) return E_POINTER; if (!IsLoaded()) return E_NOT_VALID_STATE; *pResult = m_pHeader->PartCount; return S_OK; } HRESULT DxilContainerReflection::GetPartKind(UINT32 idx, UINT32 *pResult) { if (pResult == nullptr) return E_POINTER; if (!IsLoaded()) return E_NOT_VALID_STATE; if (idx >= m_pHeader->PartCount) return E_BOUNDS; const DxilPartHeader *pPart = GetDxilContainerPart(m_pHeader, idx); *pResult = pPart->PartFourCC; return S_OK; } HRESULT DxilContainerReflection::GetPartContent(UINT32 idx, IDxcBlob **ppResult) { if (ppResult == nullptr) return E_POINTER; *ppResult = nullptr; if (!IsLoaded()) return E_NOT_VALID_STATE; if (idx >= m_pHeader->PartCount) return E_BOUNDS; const DxilPartHeader *pPart = GetDxilContainerPart(m_pHeader, idx); const char *pData = GetDxilPartData(pPart); uint32_t offset = (uint32_t)(pData - (char *)m_container ->GetBufferPointer()); // Offset from the beginning. uint32_t length = pPart->PartSize; DxcThreadMalloc TM(m_pMalloc); return DxcCreateBlobFromBlob(m_container, offset, length, ppResult); } HRESULT DxilContainerReflection::FindFirstPartKind(UINT32 kind, UINT32 *pResult) { if (pResult == nullptr) return E_POINTER; *pResult = 0; if (!IsLoaded()) return E_NOT_VALID_STATE; DxilPartIterator it = std::find_if(begin(m_pHeader), end(m_pHeader), DxilPartIsType(kind)); if (it == end(m_pHeader)) return HRESULT_FROM_WIN32(ERROR_NOT_FOUND); *pResult = it.index; return S_OK; } HRESULT DxilContainerReflection::GetPartReflection(UINT32 idx, REFIID iid, void **ppvObject) { if (ppvObject == nullptr) return E_POINTER; *ppvObject = nullptr; if (!IsLoaded()) return E_NOT_VALID_STATE; if (idx >= m_pHeader->PartCount) return E_BOUNDS; const DxilPartHeader *pPart = GetDxilContainerPart(m_pHeader, idx); if (!hlsl::IsValidReflectionModulePart((hlsl::DxilFourCC)pPart->PartFourCC)) return E_NOTIMPL; // Use DFCC_ShaderStatistics for reflection instead of DXIL part, until switch // to using RDAT for reflection instead of module. const DxilPartHeader *pRDATPart = nullptr; for (idx = 0; idx < m_pHeader->PartCount; ++idx) { const DxilPartHeader *pPartTest = GetDxilContainerPart(m_pHeader, idx); if (pPartTest->PartFourCC == DFCC_RuntimeData) { pRDATPart = pPartTest; } if (pPart->PartFourCC != DFCC_ShaderStatistics) { if (pPartTest->PartFourCC == DFCC_ShaderStatistics) { const DxilProgramHeader *pProgramHeaderTest = reinterpret_cast<const DxilProgramHeader *>( GetDxilPartData(pPartTest)); if (IsValidDxilProgramHeader(pProgramHeaderTest, pPartTest->PartSize)) { pPart = pPartTest; continue; } } } } DxcThreadMalloc TM(m_pMalloc); HRESULT hr = S_OK; IFC(hlsl::CreateDxilShaderOrLibraryReflectionFromModulePart(pPart, pRDATPart, iid, ppvObject)); Cleanup: return hr; } void hlsl::CreateDxcContainerReflection(IDxcContainerReflection **ppResult) { CComPtr<DxilContainerReflection> pReflection = DxilContainerReflection::Alloc(DxcGetThreadMallocNoRef()); *ppResult = pReflection.Detach(); if (*ppResult == nullptr) throw std::bad_alloc(); } /////////////////////////////////////////////////////////////////////////////// // DxilShaderReflection implementation - helper objects. // class CShaderReflectionType; class CShaderReflectionVariable; class CShaderReflectionConstantBuffer; class CShaderReflection; struct D3D11_INTERNALSHADER_RESOURCE_DEF; class CShaderReflectionType final : public ID3D12ShaderReflectionType { friend class CShaderReflectionConstantBuffer; protected: D3D12_SHADER_TYPE_DESC m_Desc; UINT m_SizeInCBuffer; std::string m_Name; std::vector<StringRef> m_MemberNames; std::vector<CShaderReflectionType *> m_MemberTypes; CShaderReflectionType *m_pSubType; CShaderReflectionType *m_pBaseClass; std::vector<CShaderReflectionType *> m_Interfaces; ULONG_PTR m_Identity; public: // Internal HRESULT InitializeEmpty(); HRESULT Initialize(DxilModule &M, llvm::Type *type, DxilFieldAnnotation &typeAnnotation, unsigned int baseOffset, std::vector<std::unique_ptr<CShaderReflectionType>> &allTypes, bool isCBuffer); // ID3D12ShaderReflectionType STDMETHOD(GetDesc)(D3D12_SHADER_TYPE_DESC *pDesc); STDMETHOD_(ID3D12ShaderReflectionType *, GetMemberTypeByIndex)(UINT Index); STDMETHOD_(ID3D12ShaderReflectionType *, GetMemberTypeByName)(LPCSTR Name); STDMETHOD_(LPCSTR, GetMemberTypeName)(UINT Index); STDMETHOD(IsEqual)(ID3D12ShaderReflectionType *pType); STDMETHOD_(ID3D12ShaderReflectionType *, GetSubType)(); STDMETHOD_(ID3D12ShaderReflectionType *, GetBaseClass)(); STDMETHOD_(UINT, GetNumInterfaces)(); STDMETHOD_(ID3D12ShaderReflectionType *, GetInterfaceByIndex)(UINT uIndex); STDMETHOD(IsOfType)(ID3D12ShaderReflectionType *pType); STDMETHOD(ImplementsInterface)(ID3D12ShaderReflectionType *pBase); bool CheckEqual(CShaderReflectionType *pOther) { return m_Identity == pOther->m_Identity; } UINT GetCBufferSize() { return m_SizeInCBuffer; } }; class CShaderReflectionVariable final : public ID3D12ShaderReflectionVariable { protected: D3D12_SHADER_VARIABLE_DESC m_Desc; CShaderReflectionType *m_pType; CShaderReflectionConstantBuffer *m_pBuffer; BYTE *m_pDefaultValue; public: void Initialize(CShaderReflectionConstantBuffer *pBuffer, D3D12_SHADER_VARIABLE_DESC *pDesc, CShaderReflectionType *pType, BYTE *pDefaultValue); LPCSTR GetName() { return m_Desc.Name; } // ID3D12ShaderReflectionVariable STDMETHOD(GetDesc)(D3D12_SHADER_VARIABLE_DESC *pDesc); STDMETHOD_(ID3D12ShaderReflectionType *, GetType)(); STDMETHOD_(ID3D12ShaderReflectionConstantBuffer *, GetBuffer)(); STDMETHOD_(UINT, GetInterfaceSlot)(UINT uArrayIndex); }; class CShaderReflectionConstantBuffer final : public ID3D12ShaderReflectionConstantBuffer { protected: D3D12_SHADER_BUFFER_DESC m_Desc; std::vector<CShaderReflectionVariable> m_Variables; // For StructuredBuffer arrays, Name will have [0] appended for each dimension // to match fxc behavior. std::string m_ReflectionName; public: CShaderReflectionConstantBuffer() = default; CShaderReflectionConstantBuffer(CShaderReflectionConstantBuffer &&other) { m_Desc = other.m_Desc; std::swap(m_Variables, other.m_Variables); } void Initialize(DxilModule &M, DxilCBuffer &CB, std::vector<std::unique_ptr<CShaderReflectionType>> &allTypes, bool bUsageInMetadata); void InitializeStructuredBuffer( DxilModule &M, DxilResource &R, std::vector<std::unique_ptr<CShaderReflectionType>> &allTypes); void InitializeTBuffer( DxilModule &M, DxilResource &R, std::vector<std::unique_ptr<CShaderReflectionType>> &allTypes, bool bUsageInMetadata); LPCSTR GetName() { return m_Desc.Name; } // ID3D12ShaderReflectionConstantBuffer STDMETHOD(GetDesc)(D3D12_SHADER_BUFFER_DESC *pDesc); STDMETHOD_(ID3D12ShaderReflectionVariable *, GetVariableByIndex)(UINT Index); STDMETHOD_(ID3D12ShaderReflectionVariable *, GetVariableByName)(LPCSTR Name); }; // Invalid type sentinel definitions class CInvalidSRType; class CInvalidSRVariable; class CInvalidSRConstantBuffer; class CInvalidSRLibraryFunction; class CInvalidSRFunctionParameter; class CInvalidSRType final : public ID3D12ShaderReflectionType { STDMETHOD(GetDesc)(D3D12_SHADER_TYPE_DESC *pDesc) { return E_FAIL; } STDMETHOD_(ID3D12ShaderReflectionType *, GetMemberTypeByIndex)(UINT Index); STDMETHOD_(ID3D12ShaderReflectionType *, GetMemberTypeByName)(LPCSTR Name); STDMETHOD_(LPCSTR, GetMemberTypeName)(UINT Index) { return "$Invalid"; } STDMETHOD(IsEqual)(ID3D12ShaderReflectionType *pType) { return E_FAIL; } STDMETHOD_(ID3D12ShaderReflectionType *, GetSubType)(); STDMETHOD_(ID3D12ShaderReflectionType *, GetBaseClass)(); STDMETHOD_(UINT, GetNumInterfaces)() { return 0; } STDMETHOD_(ID3D12ShaderReflectionType *, GetInterfaceByIndex)(UINT uIndex); STDMETHOD(IsOfType)(ID3D12ShaderReflectionType *pType) { return E_FAIL; } STDMETHOD(ImplementsInterface)(ID3D12ShaderReflectionType *pBase) { return E_FAIL; } }; static CInvalidSRType g_InvalidSRType; ID3D12ShaderReflectionType *CInvalidSRType::GetMemberTypeByIndex(UINT) { return &g_InvalidSRType; } ID3D12ShaderReflectionType *CInvalidSRType::GetMemberTypeByName(LPCSTR) { return &g_InvalidSRType; } ID3D12ShaderReflectionType *CInvalidSRType::GetSubType() { return &g_InvalidSRType; } ID3D12ShaderReflectionType *CInvalidSRType::GetBaseClass() { return &g_InvalidSRType; } ID3D12ShaderReflectionType *CInvalidSRType::GetInterfaceByIndex(UINT) { return &g_InvalidSRType; } class CInvalidSRVariable final : public ID3D12ShaderReflectionVariable { STDMETHOD(GetDesc)(D3D12_SHADER_VARIABLE_DESC *pDesc) { return E_FAIL; } STDMETHOD_(ID3D12ShaderReflectionType *, GetType)() { return &g_InvalidSRType; } STDMETHOD_(ID3D12ShaderReflectionConstantBuffer *, GetBuffer)(); STDMETHOD_(UINT, GetInterfaceSlot)(UINT uIndex) { return UINT_MAX; } }; static CInvalidSRVariable g_InvalidSRVariable; class CInvalidSRConstantBuffer final : public ID3D12ShaderReflectionConstantBuffer { STDMETHOD(GetDesc)(D3D12_SHADER_BUFFER_DESC *pDesc) { return E_FAIL; } STDMETHOD_(ID3D12ShaderReflectionVariable *, GetVariableByIndex)(UINT Index) { return &g_InvalidSRVariable; } STDMETHOD_(ID3D12ShaderReflectionVariable *, GetVariableByName)(LPCSTR Name) { return &g_InvalidSRVariable; } }; static CInvalidSRConstantBuffer g_InvalidSRConstantBuffer; class CInvalidFunctionParameter final : public ID3D12FunctionParameterReflection { STDMETHOD(GetDesc)(D3D12_PARAMETER_DESC *pDesc) { return E_FAIL; } }; CInvalidFunctionParameter g_InvalidFunctionParameter; class CInvalidFunction final : public ID3D12FunctionReflection { STDMETHOD(GetDesc)(D3D12_FUNCTION_DESC *pDesc) { return E_FAIL; } STDMETHOD_(ID3D12ShaderReflectionConstantBuffer *, GetConstantBufferByIndex) (UINT BufferIndex) { return &g_InvalidSRConstantBuffer; } STDMETHOD_(ID3D12ShaderReflectionConstantBuffer *, GetConstantBufferByName) (LPCSTR Name) { return &g_InvalidSRConstantBuffer; } STDMETHOD(GetResourceBindingDesc) (UINT ResourceIndex, D3D12_SHADER_INPUT_BIND_DESC *pDesc) { return E_FAIL; } STDMETHOD_(ID3D12ShaderReflectionVariable *, GetVariableByName)(LPCSTR Name) { return nullptr; } STDMETHOD(GetResourceBindingDescByName) (LPCSTR Name, D3D12_SHADER_INPUT_BIND_DESC *pDesc) { return E_FAIL; } // Use D3D_RETURN_PARAMETER_INDEX to get description of the return value. STDMETHOD_(ID3D12FunctionParameterReflection *, GetFunctionParameter) (INT ParameterIndex) { return &g_InvalidFunctionParameter; } }; CInvalidFunction g_InvalidFunction; void CShaderReflectionVariable::Initialize( CShaderReflectionConstantBuffer *pBuffer, D3D12_SHADER_VARIABLE_DESC *pDesc, CShaderReflectionType *pType, BYTE *pDefaultValue) { m_pBuffer = pBuffer; memcpy(&m_Desc, pDesc, sizeof(m_Desc)); m_pType = pType; m_pDefaultValue = pDefaultValue; } HRESULT CShaderReflectionVariable::GetDesc(D3D12_SHADER_VARIABLE_DESC *pDesc) { if (!pDesc) return E_POINTER; memcpy(pDesc, &m_Desc, sizeof(m_Desc)); return S_OK; } ID3D12ShaderReflectionType *CShaderReflectionVariable::GetType() { return m_pType; } ID3D12ShaderReflectionConstantBuffer *CShaderReflectionVariable::GetBuffer() { return m_pBuffer; } UINT CShaderReflectionVariable::GetInterfaceSlot(UINT uArrayIndex) { return UINT_MAX; } ID3D12ShaderReflectionConstantBuffer *CInvalidSRVariable::GetBuffer() { return &g_InvalidSRConstantBuffer; } STDMETHODIMP CShaderReflectionType::GetDesc(D3D12_SHADER_TYPE_DESC *pDesc) { if (!pDesc) return E_POINTER; memcpy(pDesc, &m_Desc, sizeof(m_Desc)); return S_OK; } STDMETHODIMP_(ID3D12ShaderReflectionType *) CShaderReflectionType::GetMemberTypeByIndex(UINT Index) { if (Index >= m_MemberTypes.size()) { return &g_InvalidSRType; } return m_MemberTypes[Index]; } STDMETHODIMP_(LPCSTR) CShaderReflectionType::GetMemberTypeName(UINT Index) { if (Index >= m_MemberTypes.size()) { return nullptr; } return (LPCSTR)m_MemberNames[Index].bytes_begin(); } STDMETHODIMP_(ID3D12ShaderReflectionType *) CShaderReflectionType::GetMemberTypeByName(LPCSTR Name) { UINT memberCount = m_Desc.Members; for (UINT mm = 0; mm < memberCount; ++mm) { if (m_MemberNames[mm] == Name) { return m_MemberTypes[mm]; } } return nullptr; } STDMETHODIMP CShaderReflectionType::IsEqual(ID3D12ShaderReflectionType *pType) { // TODO: implement this check, if users actually depend on it return S_FALSE; } STDMETHODIMP_(ID3D12ShaderReflectionType *) CShaderReflectionType::GetSubType() { // TODO: implement `class`-related features, if requested return nullptr; } STDMETHODIMP_(ID3D12ShaderReflectionType *) CShaderReflectionType::GetBaseClass() { // TODO: implement `class`-related features, if requested return nullptr; } STDMETHODIMP_(UINT) CShaderReflectionType::GetNumInterfaces() { // HLSL interfaces have been deprecated return 0; } STDMETHODIMP_(ID3D12ShaderReflectionType *) CShaderReflectionType::GetInterfaceByIndex(UINT uIndex) { // HLSL interfaces have been deprecated return nullptr; } STDMETHODIMP CShaderReflectionType::IsOfType(ID3D12ShaderReflectionType *pType) { // TODO: implement `class`-related features, if requested return S_FALSE; } STDMETHODIMP CShaderReflectionType::ImplementsInterface(ID3D12ShaderReflectionType *pBase) { // HLSL interfaces have been deprecated return S_FALSE; } // Helper routine for types that don't have an obvious mapping // to the existing shader reflection interface. static bool ProcessUnhandledObjectType(llvm::StructType *structType, D3D_SHADER_VARIABLE_TYPE *outObjectType) { // Don't actually make this a hard error, but instead report the problem using // a suitable debug message. #ifndef NDEBUG OutputDebugFormatA( "DxilContainerReflection.cpp: error: unhandled object type '%s'.\n", structType->getName().str().c_str()); #endif *outObjectType = D3D_SVT_VOID; return true; } // Helper routine to try to detect if a type represents an HLSL "object" type // (a texture, sampler, buffer, etc.), and to extract the coresponding shader // reflection type. static bool TryToDetectObjectType(llvm::StructType *structType, D3D_SHADER_VARIABLE_TYPE *outObjectType) { // Note: This logic is largely duplicated from `dxilutil::IsHLSLObjectType` // with the addition of returning the appropriate reflection type tag. // // That logic looks error-prone, since it relies on string tests against // type names, including cases that just test against a prefix. // This code doesn't try to be any more robust. StringRef name = structType->getName(); if (name.startswith("dx.types.wave_t")) { return ProcessUnhandledObjectType(structType, outObjectType); } // Strip off some prefixes we are likely to see. name = name.ltrim("class."); name = name.ltrim("struct."); // Slice types occur as intermediates (they aren not objects) if (name.endswith("_slice_type")) { return false; } // We might check for an exact name match, or a prefix match #define EXACT_MATCH(NAME, TAG) \ else if (name == #NAME) do { \ *outObjectType = TAG; \ return true; \ } \ while (0) #define PREFIX_MATCH(NAME, TAG) \ else if (name.startswith(#NAME)) do { \ *outObjectType = TAG; \ return true; \ } \ while (0) if (0) { } EXACT_MATCH(SamplerState, D3D_SVT_SAMPLER); EXACT_MATCH(SamplerComparisonState, D3D_SVT_SAMPLER); // Note: GS output stream types are supported in the reflection interface. else if (name.startswith("TriangleStream")) { return ProcessUnhandledObjectType(structType, outObjectType); } else if (name.startswith("PointStream")) { return ProcessUnhandledObjectType(structType, outObjectType); } else if (name.startswith("LineStream")) { return ProcessUnhandledObjectType(structType, outObjectType); } PREFIX_MATCH(AppendStructuredBuffer, D3D_SVT_APPEND_STRUCTURED_BUFFER); PREFIX_MATCH(ConsumeStructuredBuffer, D3D_SVT_CONSUME_STRUCTURED_BUFFER); PREFIX_MATCH(ConstantBuffer, D3D_SVT_CBUFFER); // Note: the `HLModule` code does this trick to avoid checking more names // than it has to, but it doesn't seem 100% correct to do this. // TODO: consider just listing the `RasterizerOrdered` cases explicitly, // just as we do for the `RW` cases already. name = name.ltrim("RasterizerOrdered"); if (0) { } EXACT_MATCH(ByteAddressBuffer, D3D_SVT_BYTEADDRESS_BUFFER); EXACT_MATCH(RWByteAddressBuffer, D3D_SVT_RWBYTEADDRESS_BUFFER); PREFIX_MATCH(Buffer, D3D_SVT_BUFFER); PREFIX_MATCH(RWBuffer, D3D_SVT_RWBUFFER); PREFIX_MATCH(StructuredBuffer, D3D_SVT_STRUCTURED_BUFFER); PREFIX_MATCH(RWStructuredBuffer, D3D_SVT_RWSTRUCTURED_BUFFER); PREFIX_MATCH(Texture1D, D3D_SVT_TEXTURE1D); PREFIX_MATCH(RWTexture1D, D3D_SVT_RWTEXTURE1D); PREFIX_MATCH(Texture1DArray, D3D_SVT_TEXTURE1DARRAY); PREFIX_MATCH(RWTexture1DArray, D3D_SVT_RWTEXTURE1DARRAY); PREFIX_MATCH(Texture2D, D3D_SVT_TEXTURE2D); PREFIX_MATCH(RWTexture2D, D3D_SVT_RWTEXTURE2D); PREFIX_MATCH(Texture2DArray, D3D_SVT_TEXTURE2DARRAY); PREFIX_MATCH(RWTexture2DArray, D3D_SVT_RWTEXTURE2DARRAY); PREFIX_MATCH(Texture3D, D3D_SVT_TEXTURE3D); PREFIX_MATCH(RWTexture3D, D3D_SVT_RWTEXTURE3D); PREFIX_MATCH(TextureCube, D3D_SVT_TEXTURECUBE); PREFIX_MATCH(TextureCubeArray, D3D_SVT_TEXTURECUBEARRAY); PREFIX_MATCH(Texture2DMS, D3D_SVT_TEXTURE2DMS); PREFIX_MATCH(Texture2DMSArray, D3D_SVT_TEXTURE2DMSARRAY); #undef EXACT_MATCH #undef PREFIX_MATCH // Default: not an object type return false; } // Helper to determine if an LLVM type represents an HLSL // object type (uses the `TryToDetectObjectType()` function // defined previously). static bool IsObjectType(llvm::Type *inType) { llvm::Type *type = inType; while (type->isArrayTy()) { type = type->getArrayElementType(); } llvm::StructType *structType = dyn_cast<StructType>(type); if (!structType) return false; D3D_SHADER_VARIABLE_TYPE ignored; return TryToDetectObjectType(structType, &ignored); } HRESULT CShaderReflectionType::InitializeEmpty() { ZeroMemory(&m_Desc, sizeof(m_Desc)); return S_OK; } // Returns true if type is array and/or vec with matching number of elements. static bool MatchVectorOrMatrixType(llvm::Type *type, unsigned count, int maxDepth) { if (type->isArrayTy()) { unsigned arraySize = (unsigned)type->getArrayNumElements(); if (maxDepth < 1 || count < arraySize || (count % arraySize) != 0) return false; return MatchVectorOrMatrixType(type->getArrayElementType(), count / arraySize, maxDepth - 1); } else if (type->isVectorTy()) { if (maxDepth < 1) return false; return type->getVectorNumElements() == count; } return count == 1; } // Main logic for translating an LLVM type and associated // annotations into a D3D shader reflection type. HRESULT CShaderReflectionType::Initialize( DxilModule &M, llvm::Type *inType, DxilFieldAnnotation &typeAnnotation, unsigned int baseOffset, std::vector<std::unique_ptr<CShaderReflectionType>> &allTypes, bool isCBuffer) { DXASSERT_NOMSG(inType); // Set a bunch of fields to default values, to avoid duplication. m_Desc.Class = D3D_SVC_SCALAR; m_Desc.Rows = 0; m_Desc.Columns = 0; m_Desc.Elements = 0; m_Desc.Members = 0; m_SizeInCBuffer = 0; // Used for calculating size later unsigned cbRows = 1; unsigned cbCols = 1; unsigned cbCompSize = 4; // or 8 for 64-bit types. unsigned cbRowStride = 16; // or 32 if 64-bit and cols > 2. if (isCBuffer) { // Extract offset relative to parent. // Note: the `baseOffset` is used in the case where the type in // question is a field in a constant buffer, since then both the // field and the variable store the same offset information, and // we need to zero out the value in the type to avoid the user // of the reflection interface seeing 2x the correct value. m_Desc.Offset = typeAnnotation.GetCBufferOffset() - baseOffset; } else { m_Desc.Offset = baseOffset; } // Arrays don't seem to be represented directly in the reflection // data, but only as the `Elements` field being non-zero. // We "unwrap" any array type here, and then proceed to look // at the element type. llvm::Type *type = inType; // Arrays can be a bit difficult, since some types are translated to arrays. // Additionally, matrices have multiple potential forms, so we must pay // attention to the field annotation to determine when we have reached the // element type that may be a matrix or a vector. // There are several possible matrix encodings: // High level: struct { [rows x <cols x float>] } // High level struct stripped: [rows x <cols x float>] // High level struct stripped, one row: <cols x float> // Vector as array: [rows x [cols x float]] // Vector as array, one row: [cols x float] // Flattened vector: <(rows*cols) x float> // Flattened vector as array: [(rows*cols) x float] // And vector may use llvm vector, or be translated to array: // <cols x float> <-> [cols x float] // Use type annotation to determine if we have a vector or matrix first, // so we can stop multiplying in array dims at the right time. if (typeAnnotation.HasMatrixAnnotation()) { // We can extract the details from the annotation. DxilMatrixAnnotation const &matrixAnnotation = typeAnnotation.GetMatrixAnnotation(); switch (matrixAnnotation.Orientation) { default: #ifndef NDEBUG OutputDebugStringA( "DxilContainerReflection.cpp: error: unknown matrix orientation\n"); #endif // Note: column-major layout is the default LLVM_FALLTHROUGH; // HLSL Change case hlsl::MatrixOrientation::Undefined: case hlsl::MatrixOrientation::ColumnMajor: m_Desc.Class = D3D_SVC_MATRIX_COLUMNS; break; case hlsl::MatrixOrientation::RowMajor: m_Desc.Class = D3D_SVC_MATRIX_ROWS; break; } m_Desc.Rows = matrixAnnotation.Rows; m_Desc.Columns = matrixAnnotation.Cols; cbRows = m_Desc.Rows; cbCols = m_Desc.Columns; if (m_Desc.Class == D3D_SVC_MATRIX_COLUMNS) { std::swap(cbRows, cbCols); } } else if (unsigned cols = typeAnnotation.GetVectorSize()) { // Older format lacks this size, but the type will be a vector, // so that will be handled later by original code path. m_Desc.Class = D3D_SVC_VECTOR; m_Desc.Rows = 1; m_Desc.Columns = cols; cbRows = m_Desc.Rows; cbCols = m_Desc.Columns; } while (type->isArrayTy()) { // Already determined that this is a vector or matrix, so break if the // number of remaining array and/or vector elements matches. if (m_Desc.Class != D3D_SVC_SCALAR) { // max depth is 1 for vector, and 2 for matrix, unless rows in storage // orientation is 1. if (MatchVectorOrMatrixType( type, cbRows * cbCols, (m_Desc.Class == D3D_SVC_VECTOR || cbRows == 1) ? 1 : 2)) break; } // Non-array types should have `Elements` be zero, so as soon as we // find that we have our first real array (not a matrix), we initialize // `Elements` if (!m_Desc.Elements) m_Desc.Elements = 1; // It isn't clear what is the desired behavior for multi-dimensional arrays, // but for now we do the expedient thing of multiplying out all their // dimensions. m_Desc.Elements *= type->getArrayNumElements(); type = type->getArrayElementType(); } // Look at the annotation to try to determine the basic type of value. // // Note that DXIL supports some types that don't currently have equivalents // in the reflection interface, so we try to muddle through here. bool bMinPrec = M.GetUseMinPrecision(); D3D_SHADER_VARIABLE_TYPE componentType = D3D_SVT_VOID; switch (typeAnnotation.GetCompType().GetKind()) { case hlsl::DXIL::ComponentType::Invalid: break; case hlsl::DXIL::ComponentType::I1: componentType = D3D_SVT_BOOL; m_Name = "bool"; break; case hlsl::DXIL::ComponentType::I16: if (bMinPrec) { componentType = D3D_SVT_MIN16INT; m_Name = "min16int"; } else { componentType = D3D_SVT_INT16; m_Name = "int16_t"; cbCompSize = 2; } break; case hlsl::DXIL::ComponentType::U16: if (bMinPrec) { componentType = D3D_SVT_MIN16UINT; m_Name = "min16uint"; } else { componentType = D3D_SVT_UINT16; m_Name = "uint16_t"; cbCompSize = 2; } break; case hlsl::DXIL::ComponentType::I64: componentType = D3D_SVT_INT64; m_Name = "int64_t"; cbCompSize = 8; break; case hlsl::DXIL::ComponentType::I32: componentType = D3D_SVT_INT; m_Name = "int"; break; case hlsl::DXIL::ComponentType::U64: componentType = D3D_SVT_UINT64; m_Name = "uint64_t"; cbCompSize = 8; break; case hlsl::DXIL::ComponentType::U32: componentType = D3D_SVT_UINT; m_Name = "uint"; break; case hlsl::DXIL::ComponentType::F16: case hlsl::DXIL::ComponentType::SNormF16: case hlsl::DXIL::ComponentType::UNormF16: if (bMinPrec) { componentType = D3D_SVT_MIN16FLOAT; m_Name = "min16float"; } else { componentType = D3D_SVT_FLOAT16; m_Name = "float16_t"; cbCompSize = 2; } break; case hlsl::DXIL::ComponentType::F32: case hlsl::DXIL::ComponentType::SNormF32: case hlsl::DXIL::ComponentType::UNormF32: componentType = D3D_SVT_FLOAT; m_Name = "float"; break; case hlsl::DXIL::ComponentType::F64: case hlsl::DXIL::ComponentType::SNormF64: case hlsl::DXIL::ComponentType::UNormF64: cbCompSize = 8; componentType = D3D_SVT_DOUBLE; m_Name = "double"; break; default: #ifndef NDEBUG OutputDebugStringA( "DxilContainerReflection.cpp: error: unknown component type\n"); #endif break; } m_Desc.Type = componentType; if (m_Desc.Class != D3D_SVC_SCALAR) { // matrix or explicit vector already handled, except for name. if (m_Desc.Class == D3D_SVC_VECTOR) { m_Name += std::to_string(m_Desc.Columns); } else { m_Name += std::to_string(m_Desc.Rows) + "x" + std::to_string(m_Desc.Columns); } } else if (FixedVectorType *VT = dyn_cast<FixedVectorType>(type)) { // We assume that LLVM vectors either represent matrices (handled above) // or HLSL vectors. // // Note: the reflection interface encodes an N-vector as if it had 1 row // and N columns. m_Desc.Class = D3D_SVC_VECTOR; m_Desc.Rows = 1; m_Desc.Columns = VT->getNumElements(); m_Name += std::to_string(VT->getNumElements()); cbRows = m_Desc.Rows; cbCols = m_Desc.Columns; } else if (type->isStructTy()) { // A struct type might be an ordinary user-defined `struct`, // or one of the builtin in HLSL "object" types. StructType *structType = cast<StructType>(type); const StructLayout *structLayout = isCBuffer ? nullptr : M.GetModule()->getDataLayout().getStructLayout(structType); // We use our function to try to detect an object type // based on its name. if (TryToDetectObjectType(structType, &m_Desc.Type)) { m_Desc.Class = D3D_SVC_OBJECT; } else { // Otherwise we have a struct and need to recurse on its fields. m_Desc.Class = D3D_SVC_STRUCT; m_Desc.Rows = 1; // Try to "clean" the type name for use in reflection data llvm::StringRef name = structType->getName(); name = name.ltrim("dx.alignment.legacy."); // legacy prefix for legacy types name = name.ltrim(kHostLayoutTypePrefix); name = name.ltrim("struct."); m_Name = name; // Fields may have annotations, and we need to look at these // in order to decode their types properly. DxilTypeSystem &typeSys = M.GetTypeSystem(); DxilStructAnnotation *structAnnotation = typeSys.GetStructAnnotation(structType); // There is no annotation for empty structs unsigned int fieldCount = 0; if (structAnnotation && !structAnnotation->IsEmptyBesidesResources()) fieldCount = type->getStructNumElements(); // The DXBC reflection info computes `Columns` for a // `struct` type from the fields (see below) UINT columnCounter = 0; CShaderReflectionType *fieldReflectionType = nullptr; for (unsigned int ff = 0; ff < fieldCount; ++ff) { DxilFieldAnnotation &fieldAnnotation = structAnnotation->GetFieldAnnotation(ff); llvm::Type *fieldType = structType->getStructElementType(ff); // Skip fields with object types, since these are not part of constant // buffers, and are not allowed in other buffer types. if (IsObjectType(fieldType)) { continue; } fieldReflectionType = new CShaderReflectionType(); allTypes.push_back( std::unique_ptr<CShaderReflectionType>(fieldReflectionType)); unsigned int elementOffset = structLayout ? (unsigned int)structLayout->getElementOffset(ff) : 0; fieldReflectionType->Initialize(M, fieldType, fieldAnnotation, elementOffset, allTypes, isCBuffer); // Treat bit fields as member inside the integer. if (fieldAnnotation.HasBitFields()) fieldReflectionType->m_Desc.Members = fieldAnnotation.GetBitFields().size(); m_MemberTypes.push_back(fieldReflectionType); m_MemberNames.push_back(fieldAnnotation.GetFieldName().c_str()); // Skip structures fields with no real contents, otherwise we expand // the size of this struct by 1 when we treat a zero column size as 1. if (isa<StructType>(fieldType) && fieldReflectionType->m_Desc.Columns == 0) { continue; } // Effectively, we want to add one to `Columns` for every scalar // nested recursively inside this `struct` type (ignoring objects, // which we filtered above). We should be able to compute this as the // product of the `Columns`, `Rows` and `Elements` of each field, with // the caveat that some of these may be zero, but shoud be treated as // one. columnCounter += (fieldReflectionType->m_Desc.Columns ? fieldReflectionType->m_Desc.Columns : 1) * (fieldReflectionType->m_Desc.Rows ? fieldReflectionType->m_Desc.Rows : 1) * (fieldReflectionType->m_Desc.Elements ? fieldReflectionType->m_Desc.Elements : 1); if (fieldAnnotation.HasBitFields()) { unsigned bitOffset = 0; CShaderReflectionType *bitFieldReflectionType = nullptr; for (auto &bitfieldAnnotation : fieldAnnotation.GetBitFields()) { bitFieldReflectionType = new CShaderReflectionType(); allTypes.push_back( std::unique_ptr<CShaderReflectionType>(bitFieldReflectionType)); bitFieldReflectionType->Initialize(M, fieldType, fieldAnnotation, elementOffset, allTypes, isCBuffer); bitFieldReflectionType->m_Desc.Class = D3D_SVC_BIT_FIELD; // Save bit size to columns. bitFieldReflectionType->m_Desc.Columns = bitfieldAnnotation.GetBitFieldWidth(); // Save bit offset to Offset. bitFieldReflectionType->m_Desc.Offset = bitOffset; bitOffset += bitfieldAnnotation.GetBitFieldWidth(); fieldReflectionType->m_MemberTypes.push_back( bitFieldReflectionType); fieldReflectionType->m_MemberNames.push_back( bitfieldAnnotation.GetFieldName().c_str()); } } } m_Desc.Columns = columnCounter; if (fieldReflectionType) { // Set our size based on the last fields offset + size: m_SizeInCBuffer = fieldReflectionType->m_Desc.Offset + fieldReflectionType->m_SizeInCBuffer; if (m_Desc.Elements > 1) { unsigned alignedSize = ((m_SizeInCBuffer + 15) & ~0xF); m_SizeInCBuffer += (m_Desc.Elements - 1) * alignedSize; } } // Because we might have skipped fields during enumeration, // the `Members` count in the description might not be the same // as the field count of the original LLVM type. m_Desc.Members = m_MemberTypes.size(); } } else if (type->isPointerTy()) { #ifndef NDEBUG OutputDebugStringA( "DxilContainerReflection.cpp: error: cannot reflect pointer type\n"); #endif } else if (type->isVoidTy()) { // Name for `void` wasn't handle in the component-type `switch` above m_Name = "void"; m_Desc.Class = D3D_SVC_SCALAR; m_Desc.Rows = 1; m_Desc.Columns = 1; } else { // Assume we have a scalar at this point. m_Desc.Class = D3D_SVC_SCALAR; m_Desc.Rows = 1; m_Desc.Columns = 1; // Special-case naming switch (m_Desc.Type) { default: break; case D3D_SVT_UINT: // Scalar `uint` gets reflected as `dword`, while vectors/matrices use // `uint`... m_Name = "dword"; break; } cbRows = 1; cbCols = 1; } // TODO: are there other cases to be handled? // Compute our cbuffer size for member reflection switch (m_Desc.Class) { case D3D_SVC_SCALAR: case D3D_SVC_MATRIX_COLUMNS: case D3D_SVC_MATRIX_ROWS: case D3D_SVC_VECTOR: if (m_Desc.Elements > 1) cbRows = cbRows * m_Desc.Elements; if (cbCompSize > 4 && cbCols > 2) cbRowStride = 32; m_SizeInCBuffer = cbRowStride * (cbRows - 1) + cbCompSize * cbCols; break; } m_Desc.Name = m_Name.c_str(); return S_OK; } void CShaderReflectionConstantBuffer::Initialize( DxilModule &M, DxilCBuffer &CB, std::vector<std::unique_ptr<CShaderReflectionType>> &allTypes, bool bUsageInMetadata) { ZeroMemory(&m_Desc, sizeof(m_Desc)); m_ReflectionName = CB.GetGlobalName(); m_Desc.Name = m_ReflectionName.c_str(); m_Desc.Size = CB.GetSize(); m_Desc.Size = (m_Desc.Size + 0x0f) & ~(0x0f); // Round up to 16 bytes for reflection. m_Desc.Type = D3D_CT_CBUFFER; m_Desc.uFlags = 0; // For ConstantBuffer<> buf[2], the array size is in Resource binding count // part. Type *Ty = dxilutil::StripArrayTypes(CB.GetHLSLType()->getPointerElementType()); DxilTypeSystem &typeSys = M.GetTypeSystem(); StructType *ST = cast<StructType>(Ty); DxilStructAnnotation *annotation = typeSys.GetStructAnnotation(cast<StructType>(ST)); // Dxil from dxbc doesn't have annotation. if (!annotation) return; m_Desc.Variables = ST->getNumContainedTypes(); if (CB.GetRangeSize() > 1) { DXASSERT(m_Desc.Variables == 1, "otherwise, assumption is wrong"); } // If only one member, it's used if it's here. bool bAllUsed = ST->getNumContainedTypes() < 2; bAllUsed |= !bUsageInMetadata; // Will update in SetCBufferUsage. for (unsigned i = 0; i < ST->getNumContainedTypes(); ++i) { DxilFieldAnnotation &fieldAnnotation = annotation->GetFieldAnnotation(i); D3D12_SHADER_VARIABLE_DESC VarDesc; ZeroMemory(&VarDesc, sizeof(VarDesc)); VarDesc.uFlags = (bAllUsed || fieldAnnotation.IsCBVarUsed()) ? D3D_SVF_USED : 0; CShaderReflectionVariable Var; // Create reflection type. CShaderReflectionType *pVarType = new CShaderReflectionType(); allTypes.push_back(std::unique_ptr<CShaderReflectionType>(pVarType)); pVarType->Initialize(M, ST->getContainedType(i), fieldAnnotation, fieldAnnotation.GetCBufferOffset(), allTypes, true); // Replicate fxc bug, where Elements == 1 for inner struct of CB array, // instead of 0. if (CB.GetRangeSize() > 1) { DXASSERT(pVarType->m_Desc.Elements == 0, "otherwise, assumption is wrong"); pVarType->m_Desc.Elements = 1; } else if (CB.GetHLSLType()->getPointerElementType()->isArrayTy() && CB.GetRangeSize() == 1) { // Set elements to 1 for size 1 array. pVarType->m_Desc.Elements = 1; } BYTE *pDefaultValue = nullptr; VarDesc.Name = fieldAnnotation.GetFieldName().c_str(); VarDesc.StartOffset = fieldAnnotation.GetCBufferOffset(); VarDesc.Size = pVarType->GetCBufferSize(); Var.Initialize(this, &VarDesc, pVarType, pDefaultValue); m_Variables.push_back(Var); } } static unsigned CalcResTypeSize(DxilModule &M, DxilResource &R) { Type *Ty = R.GetHLSLType()->getPointerElementType(); if (R.IsStructuredBuffer()) { Ty = dxilutil::StripArrayTypes(Ty); } return M.GetModule()->getDataLayout().getTypeAllocSize(Ty); } void CShaderReflectionConstantBuffer::InitializeStructuredBuffer( DxilModule &M, DxilResource &R, std::vector<std::unique_ptr<CShaderReflectionType>> &allTypes) { ZeroMemory(&m_Desc, sizeof(m_Desc)); m_ReflectionName = R.GetGlobalName(); m_Desc.Type = D3D11_CT_RESOURCE_BIND_INFO; m_Desc.uFlags = 0; m_Desc.Variables = 1; D3D12_SHADER_VARIABLE_DESC VarDesc; ZeroMemory(&VarDesc, sizeof(VarDesc)); VarDesc.Name = "$Element"; VarDesc.Size = CalcResTypeSize(M, R); VarDesc.StartTexture = UINT_MAX; VarDesc.StartSampler = UINT_MAX; VarDesc.uFlags |= D3D_SVF_USED; CShaderReflectionVariable Var; // First type is an empty type: returned if no annotation available. CShaderReflectionType *pVarType = allTypes[0].get(); // Create reflection type, if we have the necessary annotation info // Extract the `struct` that wraps element type of the buffer resource Type *Ty = R.GetHLSLType()->getPointerElementType(); SmallVector<unsigned, 4> arrayDims; Ty = dxilutil::StripArrayTypes(Ty, &arrayDims); for (unsigned i = 0; i < arrayDims.size(); ++i) { m_ReflectionName += "[0]"; } m_Desc.Name = m_ReflectionName.c_str(); StructType *ST = cast<StructType>(Ty); // Look up struct type annotation on the element type DxilTypeSystem &typeSys = M.GetTypeSystem(); DxilStructAnnotation *annotation = typeSys.GetStructAnnotation(cast<StructType>(ST)); // Dxil from dxbc doesn't have annotation. if (annotation) { // Actually create the reflection type. pVarType = new CShaderReflectionType(); allTypes.push_back(std::unique_ptr<CShaderReflectionType>(pVarType)); // The user-visible element type is the first field of the wrapepr `struct` Type *fieldType = ST->getElementType(0); DxilFieldAnnotation &fieldAnnotation = annotation->GetFieldAnnotation(0); pVarType->Initialize(M, fieldType, fieldAnnotation, 0, allTypes, false); } BYTE *pDefaultValue = nullptr; Var.Initialize(this, &VarDesc, pVarType, pDefaultValue); m_Variables.push_back(Var); m_Desc.Size = VarDesc.Size; } void CShaderReflectionConstantBuffer::InitializeTBuffer( DxilModule &M, DxilResource &R, std::vector<std::unique_ptr<CShaderReflectionType>> &allTypes, bool bUsageInMetadata) { ZeroMemory(&m_Desc, sizeof(m_Desc)); m_ReflectionName = R.GetGlobalName(); m_Desc.Type = D3D11_CT_TBUFFER; m_Desc.uFlags = 0; Type *Ty = R.GetHLSLType()->getPointerElementType(); DxilTypeSystem &typeSys = M.GetTypeSystem(); StructType *ST = cast<StructType>(Ty); DxilStructAnnotation *annotation = typeSys.GetStructAnnotation(cast<StructType>(ST)); // Dxil from dxbc doesn't have annotation. if (!annotation) return; m_Desc.Name = m_ReflectionName.c_str(); m_Desc.Variables = ST->getNumContainedTypes(); // If only one member, it's used if it's here. bool bAllUsed = ST->getNumContainedTypes() < 2; bAllUsed |= !bUsageInMetadata; // Will update in SetCBufferUsage. for (unsigned i = 0; i < ST->getNumContainedTypes(); ++i) { DxilFieldAnnotation &fieldAnnotation = annotation->GetFieldAnnotation(i); D3D12_SHADER_VARIABLE_DESC VarDesc; ZeroMemory(&VarDesc, sizeof(VarDesc)); VarDesc.uFlags = (bAllUsed || fieldAnnotation.IsCBVarUsed()) ? D3D_SVF_USED : 0; CShaderReflectionVariable Var; // Create reflection type. CShaderReflectionType *pVarType = new CShaderReflectionType(); allTypes.push_back(std::unique_ptr<CShaderReflectionType>(pVarType)); pVarType->Initialize(M, ST->getContainedType(i), fieldAnnotation, fieldAnnotation.GetCBufferOffset(), allTypes, true); BYTE *pDefaultValue = nullptr; VarDesc.Name = fieldAnnotation.GetFieldName().c_str(); VarDesc.StartOffset = fieldAnnotation.GetCBufferOffset(); VarDesc.Size = pVarType->GetCBufferSize(); VarDesc.StartTexture = UINT_MAX; VarDesc.StartSampler = UINT_MAX; Var.Initialize(this, &VarDesc, pVarType, pDefaultValue); m_Variables.push_back(Var); m_Desc.Size = std::max(m_Desc.Size, VarDesc.StartOffset + VarDesc.Size); } m_Desc.Size = (m_Desc.Size + 0x0f) & ~(0x0f); // Round up to 16 bytes for reflection. } HRESULT CShaderReflectionConstantBuffer::GetDesc(D3D12_SHADER_BUFFER_DESC *pDesc) { if (!pDesc) return E_POINTER; memcpy(pDesc, &m_Desc, sizeof(m_Desc)); return S_OK; } ID3D12ShaderReflectionVariable * CShaderReflectionConstantBuffer::GetVariableByIndex(UINT Index) { if (Index >= m_Variables.size()) { return &g_InvalidSRVariable; } return &m_Variables[Index]; } ID3D12ShaderReflectionVariable * CShaderReflectionConstantBuffer::GetVariableByName(LPCSTR Name) { UINT index; if (NULL == Name) { return &g_InvalidSRVariable; } for (index = 0; index < m_Variables.size(); ++index) { if (0 == strcmp(m_Variables[index].GetName(), Name)) { return &m_Variables[index]; } } return &g_InvalidSRVariable; } /////////////////////////////////////////////////////////////////////////////// // DxilShaderReflection implementation. // static DxilResource *DxilResourceFromBase(DxilResourceBase *RB) { DxilResourceBase::Class C = RB->GetClass(); if (C == DXIL::ResourceClass::UAV || C == DXIL::ResourceClass::SRV) return (DxilResource *)RB; return nullptr; } static D3D_SHADER_INPUT_TYPE ResourceToShaderInputType(DxilResourceBase *RB) { DxilResource *R = DxilResourceFromBase(RB); bool isUAV = RB->GetClass() == DxilResourceBase::Class::UAV; switch (RB->GetKind()) { case DxilResource::Kind::CBuffer: return D3D_SIT_CBUFFER; case DxilResource::Kind::Sampler: return D3D_SIT_SAMPLER; case DxilResource::Kind::RawBuffer: return isUAV ? D3D_SIT_UAV_RWBYTEADDRESS : D3D_SIT_BYTEADDRESS; case DxilResource::Kind::StructuredBuffer: { if (!isUAV) return D3D_SIT_STRUCTURED; // TODO: D3D_SIT_UAV_CONSUME_STRUCTURED, D3D_SIT_UAV_APPEND_STRUCTURED? if (R->HasCounter()) return D3D_SIT_UAV_RWSTRUCTURED_WITH_COUNTER; return D3D_SIT_UAV_RWSTRUCTURED; } case DxilResource::Kind::TBuffer: return D3D_SIT_TBUFFER; case DxilResource::Kind::TypedBuffer: case DxilResource::Kind::Texture1D: case DxilResource::Kind::Texture1DArray: case DxilResource::Kind::Texture2D: case DxilResource::Kind::Texture2DArray: case DxilResource::Kind::Texture2DMS: case DxilResource::Kind::Texture2DMSArray: case DxilResource::Kind::Texture3D: case DxilResource::Kind::TextureCube: case DxilResource::Kind::TextureCubeArray: return isUAV ? D3D_SIT_UAV_RWTYPED : D3D_SIT_TEXTURE; case DxilResource::Kind::RTAccelerationStructure: return (D3D_SHADER_INPUT_TYPE)(D3D_SIT_UAV_RWSTRUCTURED_WITH_COUNTER + 1); // D3D_SIT_RTACCELERATIONSTRUCTURE case DxilResource::Kind::FeedbackTexture2D: case DxilResource::Kind::FeedbackTexture2DArray: return (D3D_SHADER_INPUT_TYPE)(D3D_SIT_UAV_RWSTRUCTURED_WITH_COUNTER + 2); // D3D_SIT_UAV_FEEDBACKTEXTURE default: return (D3D_SHADER_INPUT_TYPE)-1; } } static D3D_RESOURCE_RETURN_TYPE ResourceToReturnType(DxilResourceBase *RB) { DxilResource *R = DxilResourceFromBase(RB); if (R != nullptr && !R->IsTBuffer()) { CompType CT = R->GetCompType(); if (CT.GetKind() == CompType::Kind::F64) return D3D_RETURN_TYPE_DOUBLE; if (CT.IsUNorm()) return D3D_RETURN_TYPE_UNORM; if (CT.IsSNorm()) return D3D_RETURN_TYPE_SNORM; if (CT.IsSIntTy()) return D3D_RETURN_TYPE_SINT; if (CT.IsUIntTy()) return D3D_RETURN_TYPE_UINT; if (CT.IsFloatTy()) return D3D_RETURN_TYPE_FLOAT; // D3D_RETURN_TYPE_CONTINUED: Return type is a multiple-dword type, such as // a double or uint64, and the component is continued from the previous // component that was declared. The first component represents the lower // bits. return D3D_RETURN_TYPE_MIXED; } return (D3D_RESOURCE_RETURN_TYPE)0; } static D3D_SRV_DIMENSION ResourceToDimension(DxilResourceBase *RB) { switch (RB->GetKind()) { case DxilResource::Kind::StructuredBuffer: case DxilResource::Kind::TypedBuffer: return D3D_SRV_DIMENSION_BUFFER; case DxilResource::Kind::TBuffer: return D3D_SRV_DIMENSION_UNKNOWN; // Fxc returns this case DxilResource::Kind::Texture1D: return D3D_SRV_DIMENSION_TEXTURE1D; case DxilResource::Kind::Texture1DArray: return D3D_SRV_DIMENSION_TEXTURE1DARRAY; case DxilResource::Kind::Texture2D: case DxilResource::Kind::FeedbackTexture2D: return D3D_SRV_DIMENSION_TEXTURE2D; case DxilResource::Kind::Texture2DArray: case DxilResource::Kind::FeedbackTexture2DArray: return D3D_SRV_DIMENSION_TEXTURE2DARRAY; case DxilResource::Kind::Texture2DMS: return D3D_SRV_DIMENSION_TEXTURE2DMS; case DxilResource::Kind::Texture2DMSArray: return D3D_SRV_DIMENSION_TEXTURE2DMSARRAY; case DxilResource::Kind::Texture3D: return D3D_SRV_DIMENSION_TEXTURE3D; case DxilResource::Kind::TextureCube: return D3D_SRV_DIMENSION_TEXTURECUBE; case DxilResource::Kind::TextureCubeArray: return D3D_SRV_DIMENSION_TEXTURECUBEARRAY; case DxilResource::Kind::RawBuffer: return D3D11_SRV_DIMENSION_BUFFER; // D3D11_SRV_DIMENSION_BUFFEREX? default: return D3D_SRV_DIMENSION_UNKNOWN; } } static UINT ResourceToFlags(DxilResourceBase *RB) { if (RB->GetClass() == DXIL::ResourceClass::CBuffer) return D3D_SIF_USERPACKED; UINT result = 0; DxilResource *R = DxilResourceFromBase(RB); if (R != nullptr && (R->IsAnyTexture() || R->GetKind() == DXIL::ResourceKind::TypedBuffer)) { llvm::Type *RetTy = R->GetRetType(); if (VectorType *VT = dyn_cast<VectorType>(RetTy)) { unsigned vecSize = VT->getNumElements(); switch (vecSize) { case 4: result |= D3D_SIF_TEXTURE_COMPONENTS; break; case 3: result |= D3D_SIF_TEXTURE_COMPONENT_1; break; case 2: result |= D3D_SIF_TEXTURE_COMPONENT_0; break; } } } else if (R && R->IsTBuffer()) { return D3D_SIF_USERPACKED; } else if (RB->GetClass() == DXIL::ResourceClass::Sampler) { DxilSampler *S = static_cast<DxilSampler *>(RB); if (S->GetSamplerKind() == DXIL::SamplerKind::Comparison) result |= D3D_SIF_COMPARISON_SAMPLER; } return result; } void DxilModuleReflection::CreateReflectionObjectForResource( DxilResourceBase *RB) { DxilResourceBase::Class C = RB->GetClass(); DxilResource *R = (C == DXIL::ResourceClass::UAV || C == DXIL::ResourceClass::SRV) ? (DxilResource *)RB : nullptr; D3D12_SHADER_INPUT_BIND_DESC inputBind; ZeroMemory(&inputBind, sizeof(inputBind)); inputBind.BindCount = RB->GetRangeSize(); // FXC Bug: For Unbounded range, CBuffers say bind count is UINT_MAX, but all // others report 0! if (RB->GetRangeSize() == UINT_MAX && C != DXIL::ResourceClass::CBuffer) inputBind.BindCount = 0; inputBind.BindPoint = RB->GetLowerBound(); inputBind.Dimension = ResourceToDimension(RB); inputBind.Name = RB->GetGlobalName().c_str(); inputBind.Type = ResourceToShaderInputType(RB); if (R == nullptr) { inputBind.NumSamples = 0; } else { inputBind.NumSamples = R->GetSampleCount(); if (inputBind.NumSamples == 0) { if (R->IsStructuredBuffer()) { inputBind.NumSamples = CalcResTypeSize(*m_pDxilModule, *R); } else if (!R->IsRawBuffer() && !R->IsTBuffer() && R->GetKind() != DXIL::ResourceKind::Texture2DMS && R->GetKind() != DXIL::ResourceKind::Texture2DMSArray) { inputBind.NumSamples = 0xFFFFFFFF; } } } inputBind.ReturnType = ResourceToReturnType(RB); inputBind.Space = RB->GetSpaceID(); inputBind.uFlags = ResourceToFlags(RB); inputBind.uID = RB->GetID(); m_Resources.push_back(inputBind); } // Find the imm offset part from a value. // It must exist unless offset is 0. static unsigned GetCBOffset(Value *V) { if (ConstantInt *Imm = dyn_cast<ConstantInt>(V)) return Imm->getLimitedValue(); else if (isa<UnaryInstruction>(V)) { return 0; } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) { switch (BO->getOpcode()) { case Instruction::Add: { unsigned left = GetCBOffset(BO->getOperand(0)); unsigned right = GetCBOffset(BO->getOperand(1)); return left + right; } break; case Instruction::Or: { unsigned left = GetCBOffset(BO->getOperand(0)); unsigned right = GetCBOffset(BO->getOperand(1)); return left | right; } break; default: return 0; } } else { return 0; } } static unsigned GetOffsetForCBExtractValue(ExtractValueInst *EV, bool bMinPrecision) { DXASSERT(EV->getNumIndices() == 1, "otherwise, unexpected indices/type for extractvalue"); unsigned typeSize = 4; unsigned bits = EV->getType()->getScalarSizeInBits(); if (bits == 64) typeSize = 8; else if (bits == 16 && !bMinPrecision) typeSize = 2; return (EV->getIndices().front() * typeSize); } static void CollectInPhiChain(PHINode *cbUser, std::vector<unsigned> &cbufUsage, unsigned offset, std::unordered_set<Value *> &userSet, bool bMinPrecision) { if (userSet.count(cbUser) > 0) return; userSet.insert(cbUser); for (User *cbU : cbUser->users()) { if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(cbU)) { cbufUsage.emplace_back(offset + GetOffsetForCBExtractValue(EV, bMinPrecision)); } else { PHINode *phi = cast<PHINode>(cbU); CollectInPhiChain(phi, cbufUsage, offset, userSet, bMinPrecision); } } } static void CollectCBufUsage(Value *cbHandle, std::vector<unsigned> &cbufUsage, bool bMinPrecision) { for (User *U : cbHandle->users()) { CallInst *CI = cast<CallInst>(U); ConstantInt *opcodeV = cast<ConstantInt>(CI->getArgOperand(DXIL::OperandIndex::kOpcodeIdx)); DXIL::OpCode opcode = static_cast<DXIL::OpCode>(opcodeV->getLimitedValue()); if (opcode == DXIL::OpCode::CBufferLoadLegacy) { DxilInst_CBufferLoadLegacy cbload(CI); Value *resIndex = cbload.get_regIndex(); unsigned offset = GetCBOffset(resIndex); // 16 bytes align. offset <<= 4; for (User *cbU : U->users()) { if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(cbU)) { cbufUsage.emplace_back(offset + GetOffsetForCBExtractValue(EV, bMinPrecision)); } else { PHINode *phi = cast<PHINode>(cbU); std::unordered_set<Value *> userSet; CollectInPhiChain(phi, cbufUsage, offset, userSet, bMinPrecision); } } } else if (opcode == DXIL::OpCode::CBufferLoad) { DxilInst_CBufferLoad cbload(CI); Value *byteOffset = cbload.get_byteOffset(); unsigned offset = GetCBOffset(byteOffset); cbufUsage.emplace_back(offset); } else if (opcode == DXIL::OpCode::AnnotateHandle) { DxilInst_AnnotateHandle annotateHandle(CI); Value *annotatedHandle = annotateHandle.get_res(); CollectCBufUsage(annotatedHandle, cbufUsage, bMinPrecision); } else { // DXASSERT(0, "invalid opcode"); } } } static void SetCBufVarUsage(CShaderReflectionConstantBuffer &cb, std::vector<unsigned> &usage) { D3D12_SHADER_BUFFER_DESC Desc; if (FAILED(cb.GetDesc(&Desc))) return; unsigned size = Desc.Variables; std::sort(usage.begin(), usage.end()); for (unsigned i = 0; i < size; i++) { ID3D12ShaderReflectionVariable *pVar = cb.GetVariableByIndex(i); D3D12_SHADER_VARIABLE_DESC VarDesc; if (FAILED(pVar->GetDesc(&VarDesc))) continue; if (!pVar) continue; unsigned begin = VarDesc.StartOffset; unsigned end = begin + VarDesc.Size; auto beginIt = std::find_if(usage.begin(), usage.end(), [&](unsigned v) { return v >= begin; }); auto endIt = std::find_if(usage.begin(), usage.end(), [&](unsigned v) { return v >= end; }); bool used = beginIt != endIt; // Clear used. if (!used) { CShaderReflectionType *pVarType = (CShaderReflectionType *)pVar->GetType(); BYTE *pDefaultValue = nullptr; VarDesc.uFlags &= ~D3D_SVF_USED; CShaderReflectionVariable *pCVarDesc = (CShaderReflectionVariable *)pVar; pCVarDesc->Initialize(&cb, &VarDesc, pVarType, pDefaultValue); } } } void DxilShaderReflection::SetCBufferUsage() { hlsl::OP *hlslOP = m_pDxilModule->GetOP(); LLVMContext &Ctx = m_pDxilModule->GetCtx(); // Indexes >= cbuffer size from DxilModule are SRV or UAV structured buffers. // We only collect usage for actual cbuffers, so don't go clearing usage on // other buffers. unsigned cbSize = std::min(m_CBs.size(), m_pDxilModule->GetCBuffers().size()); std::vector<std::vector<unsigned>> cbufUsage(cbSize); Function *createHandle = hlslOP->GetOpFunc(DXIL::OpCode::CreateHandle, Type::getVoidTy(Ctx)); if (createHandle->user_empty()) { createHandle->eraseFromParent(); return; } // Find all cb handles. for (User *U : createHandle->users()) { DxilInst_CreateHandle handle(cast<CallInst>(U)); Value *resClass = handle.get_resourceClass(); ConstantInt *immResClass = cast<ConstantInt>(resClass); if (immResClass->getLimitedValue() == (unsigned)DXIL::ResourceClass::CBuffer) { ConstantInt *cbID = cast<ConstantInt>(handle.get_rangeId()); CollectCBufUsage(U, cbufUsage[cbID->getLimitedValue()], m_pDxilModule->GetUseMinPrecision()); } } for (unsigned i = 0; i < cbSize; i++) { SetCBufVarUsage(*m_CBs[i], cbufUsage[i]); } } void DxilModuleReflection::CreateReflectionObjects() { DXASSERT_NOMSG(m_pDxilModule != nullptr); { // Add empty type for when no type info is available, instead of returning // nullptr. DXASSERT_NOMSG(m_Types.empty()); CShaderReflectionType *pEmptyType = new CShaderReflectionType(); m_Types.push_back(std::unique_ptr<CShaderReflectionType>(pEmptyType)); pEmptyType->InitializeEmpty(); } // Create constant buffers, resources and signatures. for (auto &&cb : m_pDxilModule->GetCBuffers()) { std::unique_ptr<CShaderReflectionConstantBuffer> rcb( new CShaderReflectionConstantBuffer()); rcb->Initialize(*m_pDxilModule, *(cb.get()), m_Types, m_bUsageInMetadata); m_CBsByName[rcb->GetName()] = (UINT)m_CBs.size(); m_CBs.emplace_back(std::move(rcb)); } // TODO: add tbuffers into m_CBs for (auto &&uav : m_pDxilModule->GetUAVs()) { if (!DXIL::IsStructuredBuffer(uav->GetKind())) { continue; } std::unique_ptr<CShaderReflectionConstantBuffer> rcb( new CShaderReflectionConstantBuffer()); rcb->InitializeStructuredBuffer(*m_pDxilModule, *(uav.get()), m_Types); m_StructuredBufferCBsByName[rcb->GetName()] = (UINT)m_CBs.size(); m_CBs.emplace_back(std::move(rcb)); } for (auto &&srv : m_pDxilModule->GetSRVs()) { if (srv->GetKind() != DxilResource::Kind::StructuredBuffer && srv->GetKind() != DxilResource::Kind::TBuffer) { continue; } std::unique_ptr<CShaderReflectionConstantBuffer> rcb( new CShaderReflectionConstantBuffer()); if (srv->GetKind() == DxilResource::Kind::TBuffer) { rcb->InitializeTBuffer(*m_pDxilModule, *(srv.get()), m_Types, m_bUsageInMetadata); m_CBsByName[rcb->GetName()] = (UINT)m_CBs.size(); } else { rcb->InitializeStructuredBuffer(*m_pDxilModule, *(srv.get()), m_Types); m_StructuredBufferCBsByName[rcb->GetName()] = (UINT)m_CBs.size(); } m_CBs.emplace_back(std::move(rcb)); } // Populate all resources. for (auto &&cbRes : m_pDxilModule->GetCBuffers()) { CreateReflectionObjectForResource(cbRes.get()); } for (auto &&samplerRes : m_pDxilModule->GetSamplers()) { CreateReflectionObjectForResource(samplerRes.get()); } for (auto &&srvRes : m_pDxilModule->GetSRVs()) { CreateReflectionObjectForResource(srvRes.get()); } for (auto &&uavRes : m_pDxilModule->GetUAVs()) { CreateReflectionObjectForResource(uavRes.get()); } } static D3D_REGISTER_COMPONENT_TYPE CompTypeToRegisterComponentType(CompType CT) { switch (CT.GetKind()) { case DXIL::ComponentType::F16: case DXIL::ComponentType::F32: return D3D_REGISTER_COMPONENT_FLOAT32; case DXIL::ComponentType::I1: case DXIL::ComponentType::U16: case DXIL::ComponentType::U32: return D3D_REGISTER_COMPONENT_UINT32; case DXIL::ComponentType::I16: case DXIL::ComponentType::I32: return D3D_REGISTER_COMPONENT_SINT32; default: return D3D_REGISTER_COMPONENT_UNKNOWN; } } static D3D_MIN_PRECISION CompTypeToMinPrecision(CompType CT) { switch (CT.GetKind()) { case DXIL::ComponentType::F16: return D3D_MIN_PRECISION_FLOAT_16; case DXIL::ComponentType::I16: return D3D_MIN_PRECISION_SINT_16; case DXIL::ComponentType::U16: return D3D_MIN_PRECISION_UINT_16; default: return D3D_MIN_PRECISION_DEFAULT; } } D3D_NAME SemanticToSystemValueType(const Semantic *S, DXIL::TessellatorDomain domain) { switch (S->GetKind()) { case Semantic::Kind::ClipDistance: return D3D_NAME_CLIP_DISTANCE; case Semantic::Kind::Arbitrary: return D3D_NAME_UNDEFINED; case Semantic::Kind::VertexID: return D3D_NAME_VERTEX_ID; case Semantic::Kind::InstanceID: return D3D_NAME_INSTANCE_ID; case Semantic::Kind::Position: return D3D_NAME_POSITION; case Semantic::Kind::Coverage: return D3D_NAME_COVERAGE; case Semantic::Kind::InnerCoverage: return D3D_NAME_INNER_COVERAGE; case Semantic::Kind::PrimitiveID: return D3D_NAME_PRIMITIVE_ID; case Semantic::Kind::SampleIndex: return D3D_NAME_SAMPLE_INDEX; case Semantic::Kind::IsFrontFace: return D3D_NAME_IS_FRONT_FACE; case Semantic::Kind::RenderTargetArrayIndex: return D3D_NAME_RENDER_TARGET_ARRAY_INDEX; case Semantic::Kind::ViewPortArrayIndex: return D3D_NAME_VIEWPORT_ARRAY_INDEX; case Semantic::Kind::CullDistance: return D3D_NAME_CULL_DISTANCE; case Semantic::Kind::Target: return D3D_NAME_TARGET; case Semantic::Kind::Depth: return D3D_NAME_DEPTH; case Semantic::Kind::DepthLessEqual: return D3D_NAME_DEPTH_LESS_EQUAL; case Semantic::Kind::DepthGreaterEqual: return D3D_NAME_DEPTH_GREATER_EQUAL; case Semantic::Kind::StencilRef: return D3D_NAME_STENCIL_REF; case Semantic::Kind::TessFactor: { switch (domain) { case DXIL::TessellatorDomain::IsoLine: return D3D_NAME_FINAL_LINE_DETAIL_TESSFACTOR; case DXIL::TessellatorDomain::Tri: return D3D_NAME_FINAL_TRI_EDGE_TESSFACTOR; case DXIL::TessellatorDomain::Quad: return D3D_NAME_FINAL_QUAD_EDGE_TESSFACTOR; default: return D3D_NAME_UNDEFINED; } case Semantic::Kind::Barycentrics: return (D3D_NAME)DxilProgramSigSemantic::Barycentrics; case Semantic::Kind::ShadingRate: return (D3D_NAME)DxilProgramSigSemantic::ShadingRate; case Semantic::Kind::CullPrimitive: return (D3D_NAME)DxilProgramSigSemantic::CullPrimitive; } case Semantic::Kind::InsideTessFactor: switch (domain) { case DXIL::TessellatorDomain::Tri: return D3D_NAME_FINAL_TRI_INSIDE_TESSFACTOR; case DXIL::TessellatorDomain::Quad: return D3D_NAME_FINAL_QUAD_INSIDE_TESSFACTOR; default: return D3D_NAME_UNDEFINED; } case Semantic::Kind::DispatchThreadID: case Semantic::Kind::GroupID: case Semantic::Kind::GroupIndex: case Semantic::Kind::GroupThreadID: case Semantic::Kind::DomainLocation: case Semantic::Kind::OutputControlPointID: case Semantic::Kind::GSInstanceID: case Semantic::Kind::Invalid: default: return D3D_NAME_UNDEFINED; } } static uint8_t NegMask(uint8_t V) { V ^= 0xF; return V & 0xF; } void DxilShaderReflection::CreateReflectionObjectsForSignature( const DxilSignature &Sig, std::vector<D3D12_SIGNATURE_PARAMETER_DESC> &Descs) { for (auto &&SigElem : Sig.GetElements()) { D3D12_SIGNATURE_PARAMETER_DESC Desc; Desc.ComponentType = CompTypeToRegisterComponentType(SigElem->GetCompType()); Desc.Mask = SigElem->GetColsAsMask(); Desc.MinPrecision = CompTypeToMinPrecision(SigElem->GetCompType()); if (m_bUsageInMetadata) { unsigned UsageMask = SigElem->GetUsageMask(); if (SigElem->IsAllocated()) UsageMask <<= SigElem->GetStartCol(); Desc.ReadWriteMask = Sig.IsInput() ? UsageMask : NegMask(UsageMask); } else { Desc.ReadWriteMask = Sig.IsInput() ? 0 : Desc.Mask; // Start with output-never-written/input-never-read. } Desc.Register = SigElem->GetStartRow(); Desc.Stream = SigElem->GetOutputStream(); Desc.SystemValueType = SemanticToSystemValueType( SigElem->GetSemantic(), m_pDxilModule->GetTessellatorDomain()); Desc.SemanticName = SigElem->GetName(); if (!SigElem->GetSemantic()->IsArbitrary()) Desc.SemanticName = CreateUpperCase(Desc.SemanticName); const std::vector<unsigned> &indexVec = SigElem->GetSemanticIndexVec(); for (unsigned semIdx = 0; semIdx < indexVec.size(); ++semIdx) { Desc.SemanticIndex = indexVec[semIdx]; if (Desc.SystemValueType == D3D_NAME_FINAL_LINE_DETAIL_TESSFACTOR && Desc.SemanticIndex == 1) Desc.SystemValueType = D3D_NAME_FINAL_LINE_DETAIL_TESSFACTOR; Descs.push_back(Desc); // When indexVec.size() > 1, subsequent indices need incremented register // index Desc.Register += 1; } } } LPCSTR DxilShaderReflection::CreateUpperCase(LPCSTR pValue) { // Restricted only to [a-z] ASCII. LPCSTR pCursor = pValue; while (*pCursor != '\0') { if ('a' <= *pCursor && *pCursor <= 'z') { break; } ++pCursor; } if (*pCursor == '\0') return pValue; std::unique_ptr<char[]> pUpperStr = llvm::make_unique<char[]>(strlen(pValue) + 1); char *pWrite = pUpperStr.get(); pCursor = pValue; for (;;) { *pWrite = *pCursor; if ('a' <= *pWrite && *pWrite <= 'z') { *pWrite += ('A' - 'a'); } if (*pWrite == '\0') break; ++pWrite; ++pCursor; } m_UpperCaseNames.push_back(std::move(pUpperStr)); return m_UpperCaseNames.back().get(); } HRESULT DxilModuleReflection::LoadRDAT(const DxilPartHeader *pPart) { if (pPart) { IFRBOOL(m_RDAT.InitFromRDAT(GetDxilPartData(pPart), pPart->PartSize), DXC_E_CONTAINER_INVALID); } return S_OK; } HRESULT DxilModuleReflection::LoadProgramHeader( const DxilProgramHeader *pProgramHeader) { try { const char *pBitcode; uint32_t bitcodeLength; GetDxilProgramBitcode((const DxilProgramHeader *)pProgramHeader, &pBitcode, &bitcodeLength); std::unique_ptr<MemoryBuffer> pMemBuffer = MemoryBuffer::getMemBufferCopy(StringRef(pBitcode, bitcodeLength)); bool bBitcodeLoadError = false; auto errorHandler = [&bBitcodeLoadError](const DiagnosticInfo &diagInfo) { bBitcodeLoadError |= diagInfo.getSeverity() == DS_Error; }; #if 0 // We materialize eagerly, because we'll need to walk instructions to look // for usage information. ErrorOr<std::unique_ptr<Module>> mod = getLazyBitcodeModule(std::move(pMemBuffer), Context, errorHandler); #else ErrorOr<std::unique_ptr<Module>> mod = parseBitcodeFile(pMemBuffer->getMemBufferRef(), Context, errorHandler); #endif if (!mod || bBitcodeLoadError) { return E_INVALIDARG; } std::swap(m_pModule, mod.get()); m_pDxilModule = &m_pModule->GetOrCreateDxilModule(); unsigned ValMajor, ValMinor; m_pDxilModule->GetValidatorVersion(ValMajor, ValMinor); m_bUsageInMetadata = hlsl::DXIL::CompareVersions(ValMajor, ValMinor, 1, 5) >= 0; CreateReflectionObjects(); return S_OK; } CATCH_CPP_RETURN_HRESULT(); } HRESULT DxilShaderReflection::Load(const DxilProgramHeader *pProgramHeader, const DxilPartHeader *pRDATPart) { IFR(LoadRDAT(pRDATPart)); IFR(LoadProgramHeader(pProgramHeader)); try { // Set cbuf usage. if (!m_bUsageInMetadata) SetCBufferUsage(); // Populate input/output/patch constant signatures. CreateReflectionObjectsForSignature(m_pDxilModule->GetInputSignature(), m_InputSignature); CreateReflectionObjectsForSignature(m_pDxilModule->GetOutputSignature(), m_OutputSignature); CreateReflectionObjectsForSignature( m_pDxilModule->GetPatchConstOrPrimSignature(), m_PatchConstantSignature); if (!m_bUsageInMetadata) MarkUsedSignatureElements(); InitDesc(); return S_OK; } CATCH_CPP_RETURN_HRESULT(); } HRESULT DxilShaderReflection::GetDesc(D3D12_SHADER_DESC *pDesc) noexcept { if (nullptr == pDesc) return E_POINTER; memcpy(pDesc, &m_Desc, sizeof(D3D12_SHADER_DESC)); return S_OK; } static bool GetUnsignedVal(Value *V, uint32_t *pValue) { ConstantInt *CI = dyn_cast<ConstantInt>(V); if (!CI) return false; uint64_t u = CI->getZExtValue(); if (u > UINT32_MAX) return false; *pValue = (uint32_t)u; return true; } void DxilShaderReflection::MarkUsedSignatureElements() { Function *F = m_pDxilModule->GetEntryFunction(); if (F == nullptr) { F = m_pDxilModule->GetPatchConstantFunction(); } DXASSERT(F != nullptr, "else module load should have failed"); // For every loadInput/storeOutput, update the corresponding ReadWriteMask. // F is a pointer to a Function instance unsigned elementCount = m_InputSignature.size() + m_OutputSignature.size() + m_PatchConstantSignature.size(); unsigned markedElementCount = 0; for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { DxilInst_LoadInput LI(&*I); DxilInst_StoreOutput SO(&*I); DxilInst_LoadPatchConstant LPC(&*I); DxilInst_StorePatchConstant SPC(&*I); DxilInst_StoreVertexOutput SVO(&*I); DxilInst_StorePrimitiveOutput SPO(&*I); std::vector<D3D12_SIGNATURE_PARAMETER_DESC> *pDescs = nullptr; const DxilSignature *pSig; uint32_t col, row, sigId; if (LI) { if (!GetUnsignedVal(LI.get_inputSigId(), &sigId)) continue; if (!GetUnsignedVal(LI.get_colIndex(), &col)) continue; GetUnsignedVal(LI.get_rowIndex(), &row); pDescs = &m_InputSignature; pSig = &m_pDxilModule->GetInputSignature(); } else if (SO) { if (!GetUnsignedVal(SO.get_outputSigId(), &sigId)) continue; if (!GetUnsignedVal(SO.get_colIndex(), &col)) continue; GetUnsignedVal(SO.get_rowIndex(), &row); pDescs = &m_OutputSignature; pSig = &m_pDxilModule->GetOutputSignature(); } else if (SPC) { if (!GetUnsignedVal(SPC.get_outputSigID(), &sigId)) continue; if (!GetUnsignedVal(SPC.get_col(), &col)) continue; GetUnsignedVal(SPC.get_row(), &row); pDescs = &m_PatchConstantSignature; pSig = &m_pDxilModule->GetPatchConstOrPrimSignature(); } else if (LPC) { if (!GetUnsignedVal(LPC.get_inputSigId(), &sigId)) continue; if (!GetUnsignedVal(LPC.get_col(), &col)) continue; GetUnsignedVal(LPC.get_row(), &row); pDescs = &m_PatchConstantSignature; pSig = &m_pDxilModule->GetPatchConstOrPrimSignature(); } else if (SVO) { if (!GetUnsignedVal(SVO.get_outputSigId(), &sigId)) continue; if (!GetUnsignedVal(SVO.get_colIndex(), &col)) continue; GetUnsignedVal(SVO.get_rowIndex(), &row); pSig = &m_pDxilModule->GetOutputSignature(); } else if (SPO) { if (!GetUnsignedVal(SPO.get_outputSigId(), &sigId)) continue; if (!GetUnsignedVal(SPO.get_colIndex(), &col)) continue; GetUnsignedVal(SPO.get_rowIndex(), &row); pSig = &m_pDxilModule->GetPatchConstOrPrimSignature(); } else { continue; } if (sigId >= pDescs->size()) continue; D3D12_SIGNATURE_PARAMETER_DESC *pDesc = &(*pDescs)[sigId]; // Consider being more fine-grained about masks. // We report sometimes-read on input as always-read. unsigned UsedMask = pSig->IsInput() ? pDesc->Mask : NegMask(pDesc->Mask); if (pDesc->ReadWriteMask == UsedMask) continue; pDesc->ReadWriteMask = UsedMask; ++markedElementCount; if (markedElementCount == elementCount) return; } } void DxilShaderReflection::InitDesc() { D3D12_SHADER_DESC *pDesc = &m_Desc; const DxilModule &M = *m_pDxilModule; const ShaderModel *pSM = M.GetShaderModel(); pDesc->Version = EncodeVersion(pSM->GetKind(), pSM->GetMajor(), pSM->GetMinor()); Module *pModule = M.GetModule(); if (NamedMDNode *pIdentMD = pModule->getNamedMetadata("llvm.ident")) { if (pIdentMD->getNumOperands()) { if (MDNode *pMDList = pIdentMD->getOperand(0)) { if (pMDList->getNumOperands()) { if (MDString *pMDString = dyn_cast_or_null<MDString>(pMDList->getOperand(0))) { pDesc->Creator = pMDString->getString().data(); } } } } } // Unset: UINT Flags; // Shader // compilation/parse flags pDesc->ConstantBuffers = m_CBs.size(); pDesc->BoundResources = m_Resources.size(); pDesc->InputParameters = m_InputSignature.size(); pDesc->OutputParameters = m_OutputSignature.size(); pDesc->PatchConstantParameters = m_PatchConstantSignature.size(); pDesc->GSOutputTopology = (D3D_PRIMITIVE_TOPOLOGY)M.GetStreamPrimitiveTopology(); pDesc->GSMaxOutputVertexCount = M.GetMaxVertexCount(); if (pSM->IsHS()) pDesc->InputPrimitive = (D3D_PRIMITIVE)(D3D_PRIMITIVE_1_CONTROL_POINT_PATCH + M.GetInputControlPointCount() - 1); else pDesc->InputPrimitive = (D3D_PRIMITIVE)M.GetInputPrimitive(); pDesc->cGSInstanceCount = M.GetGSInstanceCount(); if (pSM->IsHS()) pDesc->cControlPoints = M.GetOutputControlPointCount(); else if (pSM->IsDS()) pDesc->cControlPoints = M.GetInputControlPointCount(); pDesc->HSOutputPrimitive = (D3D_TESSELLATOR_OUTPUT_PRIMITIVE)M.GetTessellatorOutputPrimitive(); pDesc->HSPartitioning = (D3D_TESSELLATOR_PARTITIONING)M.GetTessellatorPartitioning(); pDesc->TessellatorDomain = (D3D_TESSELLATOR_DOMAIN)M.GetTessellatorDomain(); // Instruction counts only roughly track some fxc counters DxilCounters counters = {}; m_pDxilModule->LoadDxilCounters(counters); // UINT InstructionCount; // Num llvm instructions in all functions // UINT TempArrayCount; // Number of bytes used in arrays (alloca + static // global) // UINT DynamicFlowControlCount; // Number of branches with more than one // successor for now // UINT ArrayInstructionCount; // number of load/store on arrays for now pDesc->InstructionCount = counters.insts; pDesc->TempArrayCount = counters.AllArrayBytes(); pDesc->DynamicFlowControlCount = counters.branches; pDesc->ArrayInstructionCount = counters.AllArrayAccesses(); // UINT FloatInstructionCount; // Number of floating point arithmetic // instructions used // UINT IntInstructionCount; // Number of signed integer arithmetic // instructions used // UINT UintInstructionCount; // Number of unsigned integer arithmetic // instructions used pDesc->FloatInstructionCount = counters.floats; pDesc->IntInstructionCount = counters.ints; pDesc->UintInstructionCount = counters.uints; // UINT TextureNormalInstructions; // Number of non-categorized texture // instructions // UINT TextureLoadInstructions; // Number of texture load // instructions // UINT TextureCompInstructions; // Number of texture // comparison instructions // UINT TextureBiasInstructions; // Number of // texture bias instructions // UINT TextureGradientInstructions; // Number of // texture gradient instructions pDesc->TextureNormalInstructions = counters.tex_norm; pDesc->TextureLoadInstructions = counters.tex_load; pDesc->TextureCompInstructions = counters.tex_cmp; pDesc->TextureBiasInstructions = counters.tex_bias; pDesc->TextureGradientInstructions = counters.tex_grad; // UINT CutInstructionCount; // Number of cut instructions used // UINT EmitInstructionCount; // Number of emit instructions used pDesc->CutInstructionCount = counters.gs_cut; pDesc->EmitInstructionCount = counters.gs_emit; // UINT cBarrierInstructions; // Number of barrier instructions in a // compute shader // UINT cInterlockedInstructions; // Number of // interlocked instructions // UINT cTextureStoreInstructions; // Number of // texture writes pDesc->cBarrierInstructions = counters.barrier; pDesc->cInterlockedInstructions = counters.atomic; pDesc->cTextureStoreInstructions = counters.tex_store; // Unset: UINT TempRegisterCount; // Don't know how to map this for SSA (not // going to do reg allocation here) // Unset: UINT DefCount; // Not sure what to map this to // Unset: UINT DclCount; // Number of declarations (input + output) // TODO: map to used input + output signature rows? // Unset: UINT StaticFlowControlCount; // Number of static flow control // instructions used This used to map to flow control using special // int/bool constant registers in DX9. // Unset: UINT MacroInstructionCount; // Number of macro instructions used // Macro instructions are a <= DX9 concept. } ID3D12ShaderReflectionConstantBuffer * DxilShaderReflection::GetConstantBufferByIndex(UINT Index) noexcept { return DxilModuleReflection::_GetConstantBufferByIndex(Index); } ID3D12ShaderReflectionConstantBuffer * DxilModuleReflection::_GetConstantBufferByIndex(UINT Index) { if (Index >= m_CBs.size()) { return &g_InvalidSRConstantBuffer; } return m_CBs[Index].get(); } ID3D12ShaderReflectionConstantBuffer * DxilShaderReflection::GetConstantBufferByName(LPCSTR Name) noexcept { return DxilModuleReflection::_GetConstantBufferByName(Name); } ID3D12ShaderReflectionConstantBuffer * DxilModuleReflection::_GetConstantBufferByName(LPCSTR Name) { if (!Name) { return &g_InvalidSRConstantBuffer; } size_t index = m_CBs.size(); auto it = m_CBsByName.find(Name); if (it != m_CBsByName.end()) { index = it->second; } else { it = m_StructuredBufferCBsByName.find(Name); if (it != m_StructuredBufferCBsByName.end()) { index = it->second; } } if (index < m_CBs.size()) { return m_CBs[index].get(); } return &g_InvalidSRConstantBuffer; } HRESULT DxilShaderReflection::GetResourceBindingDesc( UINT ResourceIndex, D3D12_SHADER_INPUT_BIND_DESC *pDesc) noexcept { return DxilModuleReflection::_GetResourceBindingDesc(ResourceIndex, pDesc, m_PublicAPI); } HRESULT DxilModuleReflection::_GetResourceBindingDesc( UINT ResourceIndex, D3D12_SHADER_INPUT_BIND_DESC *pDesc, PublicAPI api) { IFRBOOL(pDesc != nullptr, E_INVALIDARG); IFRBOOL(ResourceIndex < m_Resources.size(), E_INVALIDARG); if (api != PublicAPI::D3D12) { memcpy(pDesc, &m_Resources[ResourceIndex], sizeof(D3D11_SHADER_INPUT_BIND_DESC)); } else { *pDesc = m_Resources[ResourceIndex]; } return S_OK; } HRESULT DxilShaderReflection::GetInputParameterDesc( UINT ParameterIndex, D3D12_SIGNATURE_PARAMETER_DESC *pDesc) noexcept { IFRBOOL(pDesc != nullptr, E_INVALIDARG); IFRBOOL(ParameterIndex < m_InputSignature.size(), E_INVALIDARG); if (m_PublicAPI != PublicAPI::D3D11_43) *pDesc = m_InputSignature[ParameterIndex]; else memcpy(pDesc, &m_InputSignature[ParameterIndex], // D3D11_43 does not have MinPrecison. offsetof(D3D12_SIGNATURE_PARAMETER_DESC, Stream) + sizeof(D3D12_SIGNATURE_PARAMETER_DESC::Stream)); return S_OK; } HRESULT DxilShaderReflection::GetOutputParameterDesc( UINT ParameterIndex, D3D12_SIGNATURE_PARAMETER_DESC *pDesc) noexcept { IFRBOOL(pDesc != nullptr, E_INVALIDARG); IFRBOOL(ParameterIndex < m_OutputSignature.size(), E_INVALIDARG); if (m_PublicAPI != PublicAPI::D3D11_43) *pDesc = m_OutputSignature[ParameterIndex]; else memcpy(pDesc, &m_OutputSignature[ParameterIndex], // D3D11_43 does not have MinPrecison. offsetof(D3D12_SIGNATURE_PARAMETER_DESC, Stream) + sizeof(D3D12_SIGNATURE_PARAMETER_DESC::Stream)); return S_OK; } HRESULT DxilShaderReflection::GetPatchConstantParameterDesc( UINT ParameterIndex, D3D12_SIGNATURE_PARAMETER_DESC *pDesc) noexcept { IFRBOOL(pDesc != nullptr, E_INVALIDARG); IFRBOOL(ParameterIndex < m_PatchConstantSignature.size(), E_INVALIDARG); if (m_PublicAPI != PublicAPI::D3D11_43) *pDesc = m_PatchConstantSignature[ParameterIndex]; else memcpy(pDesc, &m_PatchConstantSignature[ParameterIndex], // D3D11_43 does not have MinPrecison. offsetof(D3D12_SIGNATURE_PARAMETER_DESC, Stream) + sizeof(D3D12_SIGNATURE_PARAMETER_DESC::Stream)); return S_OK; } ID3D12ShaderReflectionVariable * DxilShaderReflection::GetVariableByName(LPCSTR Name) noexcept { return DxilModuleReflection::_GetVariableByName(Name); } ID3D12ShaderReflectionVariable * DxilModuleReflection::_GetVariableByName(LPCSTR Name) { if (Name != nullptr) { // Iterate through all cbuffers to find the variable. for (UINT i = 0; i < m_CBs.size(); i++) { ID3D12ShaderReflectionVariable *pVar = m_CBs[i]->GetVariableByName(Name); if (pVar != &g_InvalidSRVariable) { return pVar; } } } return &g_InvalidSRVariable; } HRESULT DxilShaderReflection::GetResourceBindingDescByName( LPCSTR Name, D3D12_SHADER_INPUT_BIND_DESC *pDesc) noexcept { return DxilModuleReflection::_GetResourceBindingDescByName(Name, pDesc, m_PublicAPI); } HRESULT DxilModuleReflection::_GetResourceBindingDescByName( LPCSTR Name, D3D12_SHADER_INPUT_BIND_DESC *pDesc, PublicAPI api) { IFRBOOL(Name != nullptr, E_INVALIDARG); for (UINT i = 0; i < m_Resources.size(); i++) { if (strcmp(m_Resources[i].Name, Name) == 0) { if (api != PublicAPI::D3D12) { memcpy(pDesc, &m_Resources[i], sizeof(D3D11_SHADER_INPUT_BIND_DESC)); } else { *pDesc = m_Resources[i]; } return S_OK; } } return HRESULT_FROM_WIN32(ERROR_NOT_FOUND); } UINT DxilShaderReflection::GetMovInstructionCount() noexcept { return 0; } UINT DxilShaderReflection::GetMovcInstructionCount() noexcept { return 0; } UINT DxilShaderReflection::GetConversionInstructionCount() noexcept { return 0; } UINT DxilShaderReflection::GetBitwiseInstructionCount() noexcept { return 0; } D3D_PRIMITIVE DxilShaderReflection::GetGSInputPrimitive() noexcept { if (!m_pDxilModule->GetShaderModel()->IsGS()) return D3D_PRIMITIVE::D3D10_PRIMITIVE_UNDEFINED; return (D3D_PRIMITIVE)m_pDxilModule->GetInputPrimitive(); } BOOL DxilShaderReflection::IsSampleFrequencyShader() noexcept { // TODO: determine correct value return FALSE; } UINT DxilShaderReflection::GetNumInterfaceSlots() noexcept { return 0; } HRESULT DxilShaderReflection::GetMinFeatureLevel(D3D_FEATURE_LEVEL *pLevel) noexcept { IFR(AssignToOut(D3D_FEATURE_LEVEL_12_0, pLevel)); return S_OK; } UINT DxilShaderReflection::GetThreadGroupSize(UINT *pSizeX, UINT *pSizeY, UINT *pSizeZ) noexcept { if (!m_pDxilModule->GetShaderModel()->IsCS() && !m_pDxilModule->GetShaderModel()->IsMS() && !m_pDxilModule->GetShaderModel()->IsAS()) { AssignToOutOpt((UINT)0, pSizeX); AssignToOutOpt((UINT)0, pSizeY); AssignToOutOpt((UINT)0, pSizeZ); return 0; } unsigned x = m_pDxilModule->GetNumThreads(0); unsigned y = m_pDxilModule->GetNumThreads(1); unsigned z = m_pDxilModule->GetNumThreads(2); AssignToOutOpt(x, pSizeX); AssignToOutOpt(y, pSizeY); AssignToOutOpt(z, pSizeZ); return x * y * z; } UINT64 DxilShaderReflection::GetRequiresFlags() noexcept { UINT64 result = m_pDxilModule->m_ShaderFlags.GetFeatureInfo(); // FeatureInfo flags are identical, with the exception of a collision between: // SHADER_FEATURE_COMPUTE_SHADERS_PLUS_RAW_AND_STRUCTURED_BUFFERS_VIA_SHADER_4_X // and D3D_SHADER_REQUIRES_EARLY_DEPTH_STENCIL // We keep track of the flag elsewhere, so use that instead. result &= ~(UINT64)D3D_SHADER_REQUIRES_EARLY_DEPTH_STENCIL; if (m_pDxilModule->m_ShaderFlags.GetForceEarlyDepthStencil()) result |= D3D_SHADER_REQUIRES_EARLY_DEPTH_STENCIL; return result; } // ID3D12FunctionReflection class CFunctionReflection final : public ID3D12FunctionReflection { protected: DxilLibraryReflection *m_pLibraryReflection = nullptr; const Function *m_pFunction; const DxilFunctionProps *m_pProps; // nullptr if non-shader library function // or patch constant function std::string m_Name; typedef SmallSetVector<UINT32, 8> ResourceUseSet; ResourceUseSet m_UsedResources; ResourceUseSet m_UsedCBs; UINT64 m_FeatureFlags; public: void Initialize(DxilLibraryReflection *pLibraryReflection, Function *pFunction) { DXASSERT_NOMSG(pLibraryReflection); DXASSERT_NOMSG(pFunction); m_pLibraryReflection = pLibraryReflection; m_pFunction = pFunction; const DxilModule &M = *m_pLibraryReflection->m_pDxilModule; m_Name = m_pFunction->getName().str(); m_pProps = nullptr; if (M.HasDxilFunctionProps(m_pFunction)) { m_pProps = &M.GetDxilFunctionProps(m_pFunction); } } void AddResourceReference(UINT resIndex) { m_UsedResources.insert(resIndex); } void AddCBReference(UINT cbIndex) { m_UsedCBs.insert(cbIndex); } void SetFeatureFlags(UINT64 flags) { m_FeatureFlags = flags; } // ID3D12FunctionReflection STDMETHOD(GetDesc)(D3D12_FUNCTION_DESC *pDesc); // BufferIndex relative to used constant buffers here STDMETHOD_(ID3D12ShaderReflectionConstantBuffer *, GetConstantBufferByIndex) (UINT BufferIndex); STDMETHOD_(ID3D12ShaderReflectionConstantBuffer *, GetConstantBufferByName) (LPCSTR Name); STDMETHOD(GetResourceBindingDesc) (UINT ResourceIndex, D3D12_SHADER_INPUT_BIND_DESC *pDesc); STDMETHOD_(ID3D12ShaderReflectionVariable *, GetVariableByName)(LPCSTR Name); STDMETHOD(GetResourceBindingDescByName) (LPCSTR Name, D3D12_SHADER_INPUT_BIND_DESC *pDesc); // Use D3D_RETURN_PARAMETER_INDEX to get description of the return value. STDMETHOD_(ID3D12FunctionParameterReflection *, GetFunctionParameter) (INT ParameterIndex) { return &g_InvalidFunctionParameter; } }; HRESULT CFunctionReflection::GetDesc(D3D12_FUNCTION_DESC *pDesc) { DXASSERT_NOMSG(m_pLibraryReflection); IFR(ZeroMemoryToOut(pDesc)); const ShaderModel *pSM = m_pLibraryReflection->m_pDxilModule->GetShaderModel(); DXIL::ShaderKind kind = DXIL::ShaderKind::Library; if (m_pProps) { kind = m_pProps->shaderKind; } pDesc->Version = EncodeVersion(kind, pSM->GetMajor(), pSM->GetMinor()); // Unset: LPCSTR Creator; // Creator string // Unset: UINT Flags; // Shader compilation/parse flags pDesc->ConstantBuffers = (UINT)m_UsedCBs.size(); pDesc->BoundResources = (UINT)m_UsedResources.size(); // Unset: UINT InstructionCount; // Number of emitted instructions // Unset: UINT TempRegisterCount; // Number of temporary registers used // Unset: UINT TempArrayCount; // Number of temporary arrays used // Unset: UINT DefCount; // Number of constant defines // Unset: UINT DclCount; // Number of declarations (input + output) // Unset: UINT // TextureNormalInstructions; // Number of non-categorized texture // instructions // Unset: UINT TextureLoadInstructions; // Number of texture load // instructions // Unset: UINT TextureCompInstructions; // Number of texture comparison // instructions // Unset: UINT TextureBiasInstructions;// Number of texture bias instructions // Unset: UINT TextureGradientInstructions; // Number of texture gradient // instructions // Unset: UINT FloatInstructionCount; // Number of floating point arithmetic // instructions used // Unset: UINT IntInstructionCount; // Number of signed integer arithmetic // instructions used // Unset: UINT UintInstructionCount; // Number of unsigned integer // arithmetic instructions used // Unset: UINT StaticFlowControlCount; // Number of static flow control // instructions used // Unset: UINT DynamicFlowControlCount; // Number of dynamic flow control // instructions used // Unset: UINT MacroInstructionCount; // Number of macro instructions used // Unset: UINT ArrayInstructionCount; // Number of array instructions used // Unset: UINT MovInstructionCount; // Number of mov instructions used // Unset: UINT MovcInstructionCount; // Number of movc instructions used // Unset: UINT ConversionInstructionCount; // Number of type conversion // instructions used // Unset: UINT BitwiseInstructionCount; // Number of bitwise arithmetic // instructions used // Unset: D3D_FEATURE_LEVEL MinFeatureLevel; // Min target of the function // byte code pDesc->RequiredFeatureFlags = m_FeatureFlags & ~(UINT64)D3D_SHADER_REQUIRES_EARLY_DEPTH_STENCIL; // Also Mask off function-level derivatives flag. pDesc->RequiredFeatureFlags &= ~DXIL::OptFeatureInfo_UsesDerivatives; if (kind == DXIL::ShaderKind::Pixel && m_pProps && m_pProps->ShaderProps.PS.EarlyDepthStencil) { pDesc->RequiredFeatureFlags |= D3D_SHADER_REQUIRES_EARLY_DEPTH_STENCIL; } pDesc->Name = m_Name.c_str(); // Unset: INT FunctionParameterCount; // Number of logical parameters in the // function signature (not including return) // Unset: BOOL HasReturn; // TRUE, if function returns a value, false - it is // a subroutine // Unset: BOOL Has10Level9VertexShader; // TRUE, if there is a 10L9 VS blob // Unset: BOOL Has10Level9PixelShader; // TRUE, if there is a 10L9 PS blob return S_OK; } // BufferIndex is relative to used constant buffers here ID3D12ShaderReflectionConstantBuffer * CFunctionReflection::GetConstantBufferByIndex(UINT BufferIndex) { DXASSERT_NOMSG(m_pLibraryReflection); if (BufferIndex >= m_UsedCBs.size()) return &g_InvalidSRConstantBuffer; return m_pLibraryReflection->_GetConstantBufferByIndex( m_UsedCBs[BufferIndex]); } ID3D12ShaderReflectionConstantBuffer * CFunctionReflection::GetConstantBufferByName(LPCSTR Name) { DXASSERT_NOMSG(m_pLibraryReflection); return m_pLibraryReflection->_GetConstantBufferByName(Name); } HRESULT CFunctionReflection::GetResourceBindingDesc( UINT ResourceIndex, D3D12_SHADER_INPUT_BIND_DESC *pDesc) { DXASSERT_NOMSG(m_pLibraryReflection); if (ResourceIndex >= m_UsedResources.size()) return E_INVALIDARG; return m_pLibraryReflection->_GetResourceBindingDesc( m_UsedResources[ResourceIndex], pDesc); } ID3D12ShaderReflectionVariable * CFunctionReflection::GetVariableByName(LPCSTR Name) { DXASSERT_NOMSG(m_pLibraryReflection); return m_pLibraryReflection->_GetVariableByName(Name); } HRESULT CFunctionReflection::GetResourceBindingDescByName( LPCSTR Name, D3D12_SHADER_INPUT_BIND_DESC *pDesc) { DXASSERT_NOMSG(m_pLibraryReflection); return m_pLibraryReflection->_GetResourceBindingDescByName(Name, pDesc); } // DxilLibraryReflection void DxilLibraryReflection::AddResourceDependencies() { auto functionTable = m_RDAT.GetFunctionTable(); m_FunctionVector.clear(); m_FunctionVector.reserve(functionTable.Count()); std::map<StringRef, CFunctionReflection *> orderedMap; auto resourceTable = m_RDAT.GetResourceTable(); unsigned SamplersStart = 0; unsigned SRVsStart = 0; unsigned UAVsStart = 0; DXIL::ResourceClass prevClass = DXIL::ResourceClass::CBuffer; for (unsigned i = 0; i < resourceTable.Count(); i++) { auto resource = resourceTable[i]; if (prevClass != resource.getClass()) { prevClass = resource.getClass(); switch (prevClass) { case DXIL::ResourceClass::Sampler: SamplersStart = i; LLVM_FALLTHROUGH; case DXIL::ResourceClass::SRV: SRVsStart = i; LLVM_FALLTHROUGH; case DXIL::ResourceClass::UAV: UAVsStart = i; break; } } } IFTBOOL(resourceTable.Count() == m_Resources.size(), DXC_E_INCORRECT_DXIL_METADATA); for (unsigned iFunc = 0; iFunc < functionTable.Count(); ++iFunc) { auto FR = functionTable[iFunc]; auto &func = m_FunctionMap[FR.getName()]; DXASSERT(!func.get(), "otherwise duplicate named functions"); Function *F = m_pModule->getFunction(FR.getName()); func.reset(new CFunctionReflection()); func->Initialize(this, F); m_FunctionsByPtr[F] = func.get(); orderedMap[FR.getName()] = func.get(); func->SetFeatureFlags(FR.GetFeatureFlags()); for (unsigned iRes = 0; iRes < FR.getResources().Count(); ++iRes) { auto RR = FR.getResources()[iRes]; unsigned id = RR.getID(); switch (RR.getClass()) { case DXIL::ResourceClass::CBuffer: func->AddResourceReference(id); func->AddCBReference(id); break; case DXIL::ResourceClass::Sampler: func->AddResourceReference(SamplersStart + id); break; case DXIL::ResourceClass::SRV: func->AddResourceReference(SRVsStart + id); if (DXIL::IsStructuredBuffer(RR.getKind())) { auto it = m_StructuredBufferCBsByName.find(RR.getName()); if (it != m_StructuredBufferCBsByName.end()) func->AddCBReference(it->second); } else if (RR.getKind() == DXIL::ResourceKind::TBuffer) { auto it = m_CBsByName.find(RR.getName()); if (it != m_CBsByName.end()) func->AddCBReference(it->second); } break; case DXIL::ResourceClass::UAV: func->AddResourceReference(UAVsStart + id); if (DXIL::IsStructuredBuffer(RR.getKind())) { auto it = m_StructuredBufferCBsByName.find(RR.getName()); if (it != m_StructuredBufferCBsByName.end()) func->AddCBReference(it->second); } break; default: DXASSERT(false, "Unrecognized ResourceClass in RDAT"); } } } for (auto &it : orderedMap) { m_FunctionVector.push_back(it.second); } } static void CollectCBufUsageForLib(Value *V, std::vector<unsigned> &cbufUsage, bool bMinPrecision) { for (auto user : V->users()) { Value *V = user; if (auto *CI = dyn_cast<CallInst>(V)) { if (hlsl::OP::IsDxilOpFuncCallInst( CI, hlsl::OP::OpCode::CreateHandleForLib)) { CollectCBufUsage(CI, cbufUsage, bMinPrecision); } } else if (isa<GEPOperator>(V) || isa<LoadInst>(V)) { CollectCBufUsageForLib(user, cbufUsage, bMinPrecision); } } } void DxilLibraryReflection::SetCBufferUsage() { unsigned cbSize = std::min(m_CBs.size(), m_pDxilModule->GetCBuffers().size()); for (unsigned i = 0; i < cbSize; i++) { std::vector<unsigned> cbufUsage; CollectCBufUsageForLib(m_pDxilModule->GetCBuffer(i).GetGlobalSymbol(), cbufUsage, m_pDxilModule->GetUseMinPrecision()); SetCBufVarUsage(*m_CBs[i], cbufUsage); } } // ID3D12LibraryReflection HRESULT DxilLibraryReflection::Load(const DxilProgramHeader *pProgramHeader, const DxilPartHeader *pRDATPart) { IFR(LoadRDAT(pRDATPart)); IFR(LoadProgramHeader(pProgramHeader)); try { AddResourceDependencies(); if (!m_bUsageInMetadata) SetCBufferUsage(); return S_OK; } CATCH_CPP_RETURN_HRESULT(); } HRESULT DxilLibraryReflection::GetDesc(D3D12_LIBRARY_DESC *pDesc) { IFR(ZeroMemoryToOut(pDesc)); // Unset: LPCSTR Creator; // The name of the originator of the // library. Unset: UINT Flags; // Compilation flags. UINT // FunctionCount; // Number of functions exported from the library. pDesc->FunctionCount = (UINT)m_FunctionVector.size(); return S_OK; } ID3D12FunctionReflection * DxilLibraryReflection::GetFunctionByIndex(INT FunctionIndex) { if ((UINT)FunctionIndex >= m_FunctionVector.size()) return &g_InvalidFunction; return m_FunctionVector[FunctionIndex]; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/LLVMBuild.txt
; Copyright (C) Microsoft Corporation. All rights reserved. ; This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details. ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = HLSL parent = Libraries required_libraries = BitReader Core DxcSupport DxilContainer IPA Support DXIL DxcBindingTable
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLMatrixSubscriptUseReplacer.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLMatrixSubscriptUseReplacer.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "HLMatrixSubscriptUseReplacer.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/Support/Global.h" #include "llvm/IR/Constant.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Type.h" #include "llvm/IR/Value.h" using namespace llvm; using namespace hlsl; HLMatrixSubscriptUseReplacer::HLMatrixSubscriptUseReplacer( CallInst *Call, Value *LoweredPtr, Value *TempLoweredMatrix, SmallVectorImpl<Value *> &ElemIndices, bool AllowLoweredPtrGEPs, std::vector<Instruction *> &DeadInsts) : LoweredPtr(LoweredPtr), ElemIndices(ElemIndices), DeadInsts(DeadInsts), AllowLoweredPtrGEPs(AllowLoweredPtrGEPs), TempLoweredMatrix(TempLoweredMatrix) { HasScalarResult = !Call->getType()->getPointerElementType()->isVectorTy(); for (Value *ElemIdx : ElemIndices) { if (!isa<Constant>(ElemIdx)) { HasDynamicElemIndex = true; break; } } if (TempLoweredMatrix) LoweredTy = TempLoweredMatrix->getType(); else LoweredTy = LoweredPtr->getType()->getPointerElementType(); replaceUses(Call, /* GEPIdx */ nullptr); } void HLMatrixSubscriptUseReplacer::replaceUses(Instruction *PtrInst, Value *SubIdxVal) { // We handle any number of load/stores of the subscript, // whether through a GEP or not, but there should really only be one. while (!PtrInst->use_empty()) { llvm::Use &Use = *PtrInst->use_begin(); Instruction *UserInst = cast<Instruction>(Use.getUser()); bool DeleteUserInst = true; if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UserInst)) { // Recurse on GEPs DXASSERT(GEP->getNumIndices() >= 1 && GEP->getNumIndices() <= 2, "Unexpected GEP on constant matrix subscript."); DXASSERT( cast<ConstantInt>(GEP->idx_begin()->get())->isZero(), "Unexpected nonzero first index of constant matrix subscript GEP."); Value *NewSubIdxVal = SubIdxVal; if (GEP->getNumIndices() == 2) { DXASSERT(!HasScalarResult && SubIdxVal == nullptr, "Unexpected GEP on matrix subscript scalar value."); NewSubIdxVal = (GEP->idx_begin() + 1)->get(); } replaceUses(GEP, NewSubIdxVal); } else { IRBuilder<> UserBuilder(UserInst); if (Value *ScalarElemIdx = tryGetScalarIndex(SubIdxVal, UserBuilder)) { // We are accessing a scalar element if (AllowLoweredPtrGEPs) { // Simply make the instruction point to the element in the lowered // pointer DeleteUserInst = false; Value *ElemPtr = UserBuilder.CreateGEP( LoweredPtr, {UserBuilder.getInt32(0), ScalarElemIdx}); Use.set(ElemPtr); } else { bool IsDynamicIndex = !isa<Constant>(ScalarElemIdx); cacheLoweredMatrix(IsDynamicIndex, UserBuilder); if (LoadInst *Load = dyn_cast<LoadInst>(UserInst)) { Value *Elem = loadElem(ScalarElemIdx, UserBuilder); Load->replaceAllUsesWith(Elem); } else if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { storeElem(ScalarElemIdx, Store->getValueOperand(), UserBuilder); flushLoweredMatrix(UserBuilder); } else { llvm_unreachable("Unexpected matrix subscript use."); } } } else { // We are accessing a vector given by ElemIndices cacheLoweredMatrix(HasDynamicElemIndex, UserBuilder); if (LoadInst *Load = dyn_cast<LoadInst>(UserInst)) { Value *Vector = loadVector(UserBuilder); Load->replaceAllUsesWith(Vector); } else if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { storeVector(Store->getValueOperand(), UserBuilder); flushLoweredMatrix(UserBuilder); } else { llvm_unreachable("Unexpected matrix subscript use."); } } } // We replaced this use, mark it dead if (DeleteUserInst) { DXASSERT(UserInst->use_empty(), "Matrix subscript user should be dead at this point."); Use.set(UndefValue::get(Use->getType())); DeadInsts.emplace_back(UserInst); } } } Value *HLMatrixSubscriptUseReplacer::tryGetScalarIndex(Value *SubIdxVal, IRBuilder<> &Builder) { if (SubIdxVal == nullptr) { // mat[0] case, returns a vector if (!HasScalarResult) return nullptr; // mat._11 case DXASSERT_NOMSG(ElemIndices.size() == 1); return ElemIndices[0]; } if (ConstantInt *SubIdxConst = dyn_cast<ConstantInt>(SubIdxVal)) { // mat[0][0], mat[i][0] or mat._11_12[0] cases. uint64_t SubIdx = SubIdxConst->getLimitedValue(); DXASSERT(SubIdx < ElemIndices.size(), "Unexpected out of range constant matrix subindex."); return ElemIndices[SubIdx]; } // mat[0][j] or mat[i][j] case. // We need to dynamically index into the level 1 element indices if (LazyTempElemIndicesArrayAlloca == nullptr) { // The level 2 index is dynamic, use it to index a temporary array of the // level 1 indices. IRBuilder<> AllocaBuilder( dxilutil::FindAllocaInsertionPt(Builder.GetInsertPoint())); ArrayType *ArrayTy = ArrayType::get(AllocaBuilder.getInt32Ty(), ElemIndices.size()); LazyTempElemIndicesArrayAlloca = AllocaBuilder.CreateAlloca(ArrayTy); } // Store level 1 indices in the temporary array Value *GEPIndices[2] = {Builder.getInt32(0), nullptr}; for (unsigned SubIdx = 0; SubIdx < ElemIndices.size(); ++SubIdx) { GEPIndices[1] = Builder.getInt32(SubIdx); Value *TempArrayElemPtr = Builder.CreateGEP(LazyTempElemIndicesArrayAlloca, GEPIndices); Builder.CreateStore(ElemIndices[SubIdx], TempArrayElemPtr); } // Dynamically index using the subindex GEPIndices[1] = SubIdxVal; Value *ElemIdxPtr = Builder.CreateGEP(LazyTempElemIndicesArrayAlloca, GEPIndices); return Builder.CreateLoad(ElemIdxPtr); } // Unless we are allowed to GEP directly into the lowered matrix, // we must load the vector in memory in order to read or write any elements. // If we're going to dynamically index, we need to copy the vector into a // temporary array. Further loadElem/storeElem calls depend on how we cached the // matrix here. void HLMatrixSubscriptUseReplacer::cacheLoweredMatrix(bool ForDynamicIndexing, IRBuilder<> &Builder) { // If we can GEP right into the lowered pointer, no need for caching if (AllowLoweredPtrGEPs) return; // Load without memory to register representation conversion, // since the point is to mimic pointer semantics if (!TempLoweredMatrix) TempLoweredMatrix = Builder.CreateLoad(LoweredPtr); if (!ForDynamicIndexing) return; // To handle mat[i] cases, we need to copy the matrix elements to // an array which we can dynamically index. VectorType *MatVecTy = cast<VectorType>(TempLoweredMatrix->getType()); // Lazily create the temporary array alloca if (LazyTempElemArrayAlloca == nullptr) { ArrayType *TempElemArrayTy = ArrayType::get(MatVecTy->getElementType(), MatVecTy->getNumElements()); IRBuilder<> AllocaBuilder( dxilutil::FindAllocaInsertionPt(Builder.GetInsertPoint())); LazyTempElemArrayAlloca = AllocaBuilder.CreateAlloca(TempElemArrayTy); } // Copy the matrix elements to the temporary array Value *GEPIndices[2] = {Builder.getInt32(0), nullptr}; for (unsigned ElemIdx = 0; ElemIdx < MatVecTy->getNumElements(); ++ElemIdx) { Value *VecElem = Builder.CreateExtractElement( TempLoweredMatrix, static_cast<uint64_t>(ElemIdx)); GEPIndices[1] = Builder.getInt32(ElemIdx); Value *TempArrayElemPtr = Builder.CreateGEP(LazyTempElemArrayAlloca, GEPIndices); Builder.CreateStore(VecElem, TempArrayElemPtr); } // Null out the vector form so we know to use the array TempLoweredMatrix = nullptr; } Value *HLMatrixSubscriptUseReplacer::loadElem(Value *Idx, IRBuilder<> &Builder) { if (AllowLoweredPtrGEPs) { Value *ElemPtr = Builder.CreateGEP(LoweredPtr, {Builder.getInt32(0), Idx}); return Builder.CreateLoad(ElemPtr); } else if (TempLoweredMatrix == nullptr) { DXASSERT_NOMSG(LazyTempElemArrayAlloca != nullptr); Value *TempArrayElemPtr = Builder.CreateGEP(LazyTempElemArrayAlloca, {Builder.getInt32(0), Idx}); return Builder.CreateLoad(TempArrayElemPtr); } else { DXASSERT_NOMSG(isa<ConstantInt>(Idx)); return Builder.CreateExtractElement(TempLoweredMatrix, Idx); } } void HLMatrixSubscriptUseReplacer::storeElem(Value *Idx, Value *Elem, IRBuilder<> &Builder) { if (AllowLoweredPtrGEPs) { Value *ElemPtr = Builder.CreateGEP(LoweredPtr, {Builder.getInt32(0), Idx}); Builder.CreateStore(Elem, ElemPtr); } else if (TempLoweredMatrix == nullptr) { DXASSERT_NOMSG(LazyTempElemArrayAlloca != nullptr); Value *GEPIndices[2] = {Builder.getInt32(0), Idx}; Value *TempArrayElemPtr = Builder.CreateGEP(LazyTempElemArrayAlloca, GEPIndices); Builder.CreateStore(Elem, TempArrayElemPtr); } else { DXASSERT_NOMSG(isa<ConstantInt>(Idx)); TempLoweredMatrix = Builder.CreateInsertElement(TempLoweredMatrix, Elem, Idx); } } Value *HLMatrixSubscriptUseReplacer::loadVector(IRBuilder<> &Builder) { if (TempLoweredMatrix != nullptr) { // We can optimize this as a shuffle SmallVector<Constant *, 4> ShuffleIndices; ShuffleIndices.reserve(ElemIndices.size()); for (Value *ElemIdx : ElemIndices) ShuffleIndices.emplace_back(cast<Constant>(ElemIdx)); Constant *ShuffleVector = ConstantVector::get(ShuffleIndices); return Builder.CreateShuffleVector(TempLoweredMatrix, TempLoweredMatrix, ShuffleVector); } // Otherwise load elements one by one // Lowered form may be array when AllowLoweredPtrGEPs == true. Type *ElemTy = LoweredTy->isVectorTy() ? LoweredTy->getScalarType() : cast<ArrayType>(LoweredTy)->getArrayElementType(); VectorType *VecTy = VectorType::get(ElemTy, static_cast<unsigned>(ElemIndices.size())); Value *Result = UndefValue::get(VecTy); for (unsigned SubIdx = 0; SubIdx < ElemIndices.size(); ++SubIdx) { Value *Elem = loadElem(ElemIndices[SubIdx], Builder); Result = Builder.CreateInsertElement(Result, Elem, static_cast<uint64_t>(SubIdx)); } return Result; } void HLMatrixSubscriptUseReplacer::storeVector(Value *Vec, IRBuilder<> &Builder) { // We can't shuffle vectors of different sizes together, so insert one by one. DXASSERT(cast<FixedVectorType>(Vec->getType())->getNumElements() == ElemIndices.size(), "Matrix subscript stored vector element count mismatch."); for (unsigned SubIdx = 0; SubIdx < ElemIndices.size(); ++SubIdx) { Value *Elem = Builder.CreateExtractElement(Vec, static_cast<uint64_t>(SubIdx)); storeElem(ElemIndices[SubIdx], Elem, Builder); } } void HLMatrixSubscriptUseReplacer::flushLoweredMatrix(IRBuilder<> &Builder) { // If GEPs are allowed, no flushing is necessary, we modified the source // elements directly. if (AllowLoweredPtrGEPs) return; if (TempLoweredMatrix == nullptr) { // First re-create the vector from the temporary array DXASSERT_NOMSG(LazyTempElemArrayAlloca != nullptr); VectorType *LoweredMatrixTy = cast<VectorType>(LoweredTy); TempLoweredMatrix = UndefValue::get(LoweredMatrixTy); Value *GEPIndices[2] = {Builder.getInt32(0), nullptr}; for (unsigned ElemIdx = 0; ElemIdx < LoweredMatrixTy->getNumElements(); ++ElemIdx) { GEPIndices[1] = Builder.getInt32(ElemIdx); Value *TempArrayElemPtr = Builder.CreateGEP(LazyTempElemArrayAlloca, GEPIndices); Value *NewElem = Builder.CreateLoad(TempArrayElemPtr); TempLoweredMatrix = Builder.CreateInsertElement( TempLoweredMatrix, NewElem, static_cast<uint64_t>(ElemIdx)); } } // Store back the lowered matrix to its pointer Builder.CreateStore(TempLoweredMatrix, LoweredPtr); TempLoweredMatrix = nullptr; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilDeleteRedundantDebugValues.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilDeleteRedundantDebugValues.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// // // Removes as many dbg.value's as possible: // // 1. Search for all scopes (and their parent scopes) that have any real // (non-debug) // instructions at all. // // 2. For each dbg.value, if it's refering to a variable from a scope not in // the set of scopes from step 1, then delete it. // // 3. In any contiguous series of dbg.value instructions, if there are // dbg.value's // that point to the same variable+fragment, then delete all but the last // one, since it would be the only authentic mapping for that variable // fragment. #include "dxc/HLSL/DxilGenerationPass.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/Function.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include <unordered_set> using namespace llvm; namespace { class DxilDeleteRedundantDebugValues : public ModulePass { public: static char ID; explicit DxilDeleteRedundantDebugValues() : ModulePass(ID) { initializeDxilDeleteRedundantDebugValuesPass( *PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; }; char DxilDeleteRedundantDebugValues::ID; } // namespace bool DxilDeleteRedundantDebugValues::runOnModule(Module &M) { if (!llvm::hasDebugInfo(M)) return false; bool Changed = false; std::unordered_set<DILocalScope *> SeenScopes; typedef std::pair<DILocalVariable *, DIExpression *> VarPair; SmallDenseMap<VarPair, DbgValueInst *> SeenVar; for (Function &F : M) { SeenScopes.clear(); SeenVar.clear(); // Collect a set of all scopes that are attached to non-debug instructions // There's no need to keep debug info for any variables in any scopes that // don't have any real instructions anyway. for (BasicBlock &BB : F) { for (Instruction &I : BB) { if (isa<DbgInfoIntrinsic>(I)) continue; DebugLoc DL = I.getDebugLoc(); if (!DL) continue; DILocalScope *Scope = cast_or_null<DILocalScope>(DL.getScope()); if (!Scope) continue; while (Scope) { SeenScopes.insert(Scope); if (DILexicalBlockBase *LB = dyn_cast<DILexicalBlockBase>(Scope)) { Scope = LB->getScope(); } else { Scope = nullptr; } } } } for (BasicBlock &BB : F) { for (auto it = BB.begin(), end = BB.end(); it != end;) { Instruction &I = *(it++); if (!isa<DbgInfoIntrinsic>(I)) { SeenVar.clear(); continue; } DbgValueInst *DI = dyn_cast<DbgValueInst>(&I); if (!DI) continue; DILocalVariable *Var = DI->getVariable(); DIExpression *Expr = DI->getExpression(); VarPair Pair = VarPair(Var, Expr); if (!SeenScopes.count(Var->getScope())) { Changed = true; DI->eraseFromParent(); continue; } auto findIt = SeenVar.find(Pair); if (findIt != SeenVar.end()) { findIt->second->eraseFromParent(); findIt->second = DI; Changed = true; } else { SeenVar[Pair] = DI; } } } } return Changed; } ModulePass *llvm::createDxilDeleteRedundantDebugValuesPass() { return new DxilDeleteRedundantDebugValues(); } INITIALIZE_PASS(DxilDeleteRedundantDebugValues, "dxil-delete-redundant-debug-values", "Dxil Delete Redundant Debug Values", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilTargetLowering.cpp
//===-- DxilTargetLowering.cpp - Implement the DxilTargetLowering class ---===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Empty implementation of TargetLoweringBase::InstructionOpcodeToISD and // TargetLoweringBase::getTypeLegalizationCost to make TargetTransformInfo // compile. // //===----------------------------------------------------------------------===// #include "llvm/Target/TargetLowering.h" using namespace llvm; //===----------------------------------------------------------------------===// // TargetTransformInfo Helpers //===----------------------------------------------------------------------===// int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const { return 0; } std::pair<unsigned, MVT> TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL, Type *Ty) const { EVT MTy = getValueType(DL, Ty); unsigned Cost = 1; return std::make_pair(Cost, MTy.getSimpleVT()); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLMatrixBitcastLowerPass.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLMatrixBitcastLowerPass.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/HLSL/HLMatrixLowerHelper.h" #include "dxc/HLSL/HLMatrixLowerPass.h" #include "dxc/HLSL/HLMatrixType.h" #include "dxc/Support/Global.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include <unordered_set> #include <vector> using namespace llvm; using namespace hlsl; using namespace hlsl::HLMatrixLower; // Matrix Bitcast lower. // After linking Lower matrix bitcast patterns like: // %169 = bitcast [72 x float]* %0 to [6 x %class.matrix.float.4.3]* // %conv.i = fptoui float %164 to i32 // %arrayidx.i = getelementptr inbounds [6 x %class.matrix.float.4.3], [6 x // %class.matrix.float.4.3]* %169, i32 0, i32 %conv.i %170 = bitcast // %class.matrix.float.4.3* %arrayidx.i to <12 x float>* namespace { // Translate matrix type to array type. Type *LowerMatrixTypeToOneDimArray(Type *Ty) { if (HLMatrixType MatTy = HLMatrixType::dyn_cast(Ty)) { Type *EltTy = MatTy.getElementTypeForReg(); return ArrayType::get(EltTy, MatTy.getNumElements()); } else { return Ty; } } Type *LowerMatrixArrayPointerToOneDimArray(Type *Ty) { unsigned addrSpace = Ty->getPointerAddressSpace(); Ty = Ty->getPointerElementType(); unsigned arraySize = 1; while (Ty->isArrayTy()) { arraySize *= Ty->getArrayNumElements(); Ty = Ty->getArrayElementType(); } HLMatrixType MatTy = HLMatrixType::cast(Ty); arraySize *= MatTy.getNumElements(); Ty = ArrayType::get(MatTy.getElementTypeForReg(), arraySize); return PointerType::get(Ty, addrSpace); } Type *TryLowerMatTy(Type *Ty) { Type *VecTy = nullptr; if (HLMatrixType::isMatrixArrayPtr(Ty)) { VecTy = LowerMatrixArrayPointerToOneDimArray(Ty); } else if (isa<PointerType>(Ty) && HLMatrixType::isa(Ty->getPointerElementType())) { VecTy = LowerMatrixTypeToOneDimArray(Ty->getPointerElementType()); VecTy = PointerType::get(VecTy, Ty->getPointerAddressSpace()); } return VecTy; } class MatrixBitcastLowerPass : public FunctionPass { public: static char ID; // Pass identification, replacement for typeid explicit MatrixBitcastLowerPass() : FunctionPass(ID) {} StringRef getPassName() const override { return "Matrix Bitcast lower"; } bool runOnFunction(Function &F) override { bool bUpdated = false; std::unordered_set<BitCastInst *> matCastSet; for (auto blkIt = F.begin(); blkIt != F.end(); ++blkIt) { BasicBlock *BB = blkIt; for (auto iIt = BB->begin(); iIt != BB->end();) { Instruction *I = (iIt++); if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) { // Mutate mat to vec. Type *ToTy = BCI->getType(); if (TryLowerMatTy(ToTy)) { matCastSet.insert(BCI); bUpdated = true; } } } } DxilModule &DM = F.getParent()->GetOrCreateDxilModule(); // Remove bitcast which has CallInst user. if (DM.GetShaderModel()->IsLib()) { for (auto it = matCastSet.begin(); it != matCastSet.end();) { BitCastInst *BCI = *(it++); if (hasCallUser(BCI)) { matCastSet.erase(BCI); } } } // Lower matrix first. for (BitCastInst *BCI : matCastSet) { lowerMatrix(BCI, BCI->getOperand(0)); } return bUpdated; } private: void lowerMatrix(Instruction *M, Value *A); bool hasCallUser(Instruction *M); }; } // namespace bool MatrixBitcastLowerPass::hasCallUser(Instruction *M) { for (auto it = M->user_begin(); it != M->user_end();) { User *U = *(it++); if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { Type *EltTy = GEP->getType()->getPointerElementType(); if (HLMatrixType::isa(EltTy)) { if (hasCallUser(GEP)) return true; } else { DXASSERT(0, "invalid GEP for matrix"); } } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { if (hasCallUser(BCI)) return true; } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) { if (isa<VectorType>(LI->getType())) { } else { DXASSERT(0, "invalid load for matrix"); } } else if (StoreInst *ST = dyn_cast<StoreInst>(U)) { Value *V = ST->getValueOperand(); if (isa<VectorType>(V->getType())) { } else { DXASSERT(0, "invalid load for matrix"); } } else if (isa<CallInst>(U)) { return true; } else { DXASSERT(0, "invalid use of matrix"); } } return false; } namespace { Value *CreateEltGEP(Value *A, unsigned i, Value *zeroIdx, IRBuilder<> &Builder) { Value *GEP = nullptr; if (GetElementPtrInst *GEPA = dyn_cast<GetElementPtrInst>(A)) { // A should be gep oneDimArray, 0, index * matSize // Here add eltIdx to index * matSize foreach elt. Instruction *EltGEP = GEPA->clone(); unsigned eltIdx = EltGEP->getNumOperands() - 1; Value *NewIdx = Builder.CreateAdd(EltGEP->getOperand(eltIdx), Builder.getInt32(i)); EltGEP->setOperand(eltIdx, NewIdx); Builder.Insert(EltGEP); GEP = EltGEP; } else { GEP = Builder.CreateInBoundsGEP(A, {zeroIdx, Builder.getInt32(i)}); } return GEP; } } // namespace void MatrixBitcastLowerPass::lowerMatrix(Instruction *M, Value *A) { for (auto it = M->user_begin(); it != M->user_end();) { User *U = *(it++); if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { Type *EltTy = GEP->getType()->getPointerElementType(); if (HLMatrixType::isa(EltTy)) { // Change gep matrixArray, 0, index // into // gep oneDimArray, 0, index * matSize IRBuilder<> Builder(GEP); SmallVector<Value *, 2> idxList(GEP->idx_begin(), GEP->idx_end()); DXASSERT(idxList.size() == 2, "else not one dim matrix array index to matrix"); HLMatrixType MatTy = HLMatrixType::cast(EltTy); Value *matSize = Builder.getInt32(MatTy.getNumElements()); idxList.back() = Builder.CreateMul(idxList.back(), matSize); Value *NewGEP = Builder.CreateGEP(A, idxList); lowerMatrix(GEP, NewGEP); DXASSERT(GEP->user_empty(), "else lower matrix fail"); GEP->eraseFromParent(); } else { DXASSERT(0, "invalid GEP for matrix"); } } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { lowerMatrix(BCI, A); DXASSERT(BCI->user_empty(), "else lower matrix fail"); BCI->eraseFromParent(); } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) { if (VectorType *Ty = dyn_cast<VectorType>(LI->getType())) { IRBuilder<> Builder(LI); Value *zeroIdx = Builder.getInt32(0); unsigned vecSize = Ty->getNumElements(); Value *NewVec = UndefValue::get(LI->getType()); for (unsigned i = 0; i < vecSize; i++) { Value *GEP = CreateEltGEP(A, i, zeroIdx, Builder); Value *Elt = Builder.CreateLoad(GEP); NewVec = Builder.CreateInsertElement(NewVec, Elt, i); } LI->replaceAllUsesWith(NewVec); LI->eraseFromParent(); } else { DXASSERT(0, "invalid load for matrix"); } } else if (StoreInst *ST = dyn_cast<StoreInst>(U)) { Value *V = ST->getValueOperand(); if (VectorType *Ty = dyn_cast<VectorType>(V->getType())) { IRBuilder<> Builder(LI); Value *zeroIdx = Builder.getInt32(0); unsigned vecSize = Ty->getNumElements(); for (unsigned i = 0; i < vecSize; i++) { Value *GEP = CreateEltGEP(A, i, zeroIdx, Builder); Value *Elt = Builder.CreateExtractElement(V, i); Builder.CreateStore(Elt, GEP); } ST->eraseFromParent(); } else { DXASSERT(0, "invalid load for matrix"); } } else { DXASSERT(0, "invalid use of matrix"); } } } char MatrixBitcastLowerPass::ID = 0; FunctionPass *llvm::createMatrixBitcastLowerPass() { return new MatrixBitcastLowerPass(); } INITIALIZE_PASS(MatrixBitcastLowerPass, "matrixbitcastlower", "Matrix Bitcast lower", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilPrecisePropagatePass.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilPrecisePropagatePass.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilModule.h" #include "dxc/HLSL/ControlDependence.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/HLSL/HLModule.h" #include "dxc/HLSL/HLOperations.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Support/Casting.h" #include <unordered_set> #include <vector> using namespace llvm; using namespace hlsl; namespace { typedef std::unordered_set<Value *> ValueSet; struct FuncInfo { ControlDependence CtrlDep; std::unique_ptr<llvm::DominatorTreeBase<llvm::BasicBlock>> pPostDom; void Init(Function *F); void Clear(); }; typedef std::unordered_map<llvm::Function *, std::unique_ptr<FuncInfo>> FuncInfoMap; class DxilPrecisePropagatePass : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilPrecisePropagatePass() : ModulePass(ID) {} StringRef getPassName() const override { return "DXIL Precise Propagate"; } bool runOnModule(Module &M) override { m_pDM = &(M.GetOrCreateDxilModule()); std::vector<Function *> deadList; for (Function &F : M.functions()) { if (HLModule::HasPreciseAttribute(&F)) { PropagatePreciseOnFunctionUser(F); deadList.emplace_back(&F); } } for (Function *F : deadList) F->eraseFromParent(); return true; } private: void PropagatePreciseOnFunctionUser(Function &F); void AddToWorkList(Value *V); void ProcessWorkList(); void Propagate(Instruction *I); void PropagateOnPointer(Value *Ptr); void PropagateOnPointerUsers(Value *Ptr); void PropagateThroughGEPs(Value *Ptr, ArrayRef<Value *> idxList, ValueSet &processedGEPs); void PropagateOnPointerUsedInCall(Value *Ptr, CallInst *CI); void PropagateCtrlDep(FuncInfo &FI, BasicBlock *BB); void PropagateCtrlDep(BasicBlock *BB); void PropagateCtrlDep(Instruction *I); // Add to m_ProcessedSet, return true if already in set. bool Processed(Value *V) { return !m_ProcessedSet.insert(V).second; } FuncInfo &GetFuncInfo(Function *F); DxilModule *m_pDM; std::vector<Value *> m_WorkList; ValueSet m_ProcessedSet; FuncInfoMap m_FuncInfo; }; char DxilPrecisePropagatePass::ID = 0; } // namespace void DxilPrecisePropagatePass::PropagatePreciseOnFunctionUser(Function &F) { for (auto U = F.user_begin(), E = F.user_end(); U != E;) { CallInst *CI = cast<CallInst>(*(U++)); Value *V = CI->getArgOperand(0); AddToWorkList(V); ProcessWorkList(); CI->eraseFromParent(); } } void DxilPrecisePropagatePass::AddToWorkList(Value *V) { // Skip values already marked. if (Processed(V)) return; m_WorkList.emplace_back(V); } void DxilPrecisePropagatePass::ProcessWorkList() { while (!m_WorkList.empty()) { Value *V = m_WorkList.back(); m_WorkList.pop_back(); if (V->getType()->isPointerTy()) { PropagateOnPointer(V); } Instruction *I = dyn_cast<Instruction>(V); if (!I) continue; // Set precise fast math on those instructions that support it. if (DxilModule::PreservesFastMathFlags(I)) DxilModule::SetPreciseFastMathFlags(I); // Fast math not work on call, use metadata. if (isa<FPMathOperator>(I) && isa<CallInst>(I)) HLModule::MarkPreciseAttributeWithMetadata(cast<CallInst>(I)); Propagate(I); PropagateCtrlDep(I); } } void DxilPrecisePropagatePass::Propagate(Instruction *I) { if (CallInst *CI = dyn_cast<CallInst>(I)) { for (unsigned i = 0; i < CI->getNumArgOperands(); i++) AddToWorkList(CI->getArgOperand(i)); } else { for (Value *src : I->operands()) AddToWorkList(src); } if (PHINode *Phi = dyn_cast<PHINode>(I)) { // Use pred for control dependence when constant (for now) FuncInfo &FI = GetFuncInfo(I->getParent()->getParent()); for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { if (isa<Constant>(Phi->getIncomingValue(i))) PropagateCtrlDep(FI, Phi->getIncomingBlock(i)); } } } // TODO: This could be a util function // TODO: Should this tunnel through addrspace cast? // And how could bitcast be handled? static Value *GetRootAndIndicesForGEP(GEPOperator *GEP, SmallVectorImpl<Value *> &idxList) { Value *Ptr = GEP; SmallVector<GEPOperator *, 4> GEPs; GEPs.emplace_back(GEP); while ((GEP = dyn_cast<GEPOperator>(Ptr = GEP->getPointerOperand()))) GEPs.emplace_back(GEP); while (!GEPs.empty()) { GEP = GEPs.back(); GEPs.pop_back(); auto idx = GEP->idx_begin(); idx++; while (idx != GEP->idx_end()) idxList.emplace_back(*(idx++)); } return Ptr; } void DxilPrecisePropagatePass::PropagateOnPointer(Value *Ptr) { PropagateOnPointerUsers(Ptr); // GetElementPointer gets special treatment since different GEPs may be used // at different points on the same root pointer to load or store data. We // need to find any stores that could have written data to the pointer we are // marking, so we need to search through all GEPs from the root pointer for // ones that may write to the same location. // // In addition, there may be multiple GEPs between the root pointer and loads // or stores, so we need to accumulate all the indices between the root and // the leaf pointer we are marking. // // Starting at the root pointer, we follow users, looking for GEPs with // indices that could "match", or calls that may write to the pointer along // the way. A "match" to the reference index is one that matches with constant // values, or if either index is non-constant, since the compiler doesn't know // what index may be read or written in that case. // // This still doesn't handle addrspace cast or bitcast, so propagation through // groupshared aggregates will not work, as one example. if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { // Get root Ptr, gather index list, and mark matching stores SmallVector<Value *, 8> idxList; Ptr = GetRootAndIndicesForGEP(GEP, idxList); ValueSet processedGEPs; PropagateThroughGEPs(Ptr, idxList, processedGEPs); } } void DxilPrecisePropagatePass::PropagateOnPointerUsers(Value *Ptr) { // Find all store and propagate on the val operand of store. // For CallInst, if Ptr is used as out parameter, mark it. for (User *U : Ptr->users()) { if (StoreInst *stInst = dyn_cast<StoreInst>(U)) { Value *val = stInst->getValueOperand(); AddToWorkList(val); } else if (CallInst *CI = dyn_cast<CallInst>(U)) { if (Function *F = CI->getCalledFunction()) { // Skip llvm intrinsics (debug/lifetime intrinsics) if (!F->isIntrinsic()) PropagateOnPointerUsedInCall(Ptr, CI); } } else if (isa<GEPOperator>(U) || isa<BitCastOperator>(U)) { PropagateOnPointerUsers(U); } } } void DxilPrecisePropagatePass::PropagateThroughGEPs(Value *Ptr, ArrayRef<Value *> idxList, ValueSet &processedGEPs) { // recurse to matching GEP users for (User *U : Ptr->users()) { if (GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { // skip visited GEPs // These are separate from processedSet because while we don't need to // visit an intermediate GEP multiple times while marking a single value // precise, we are not necessarily marking every value reachable from // the GEP as precise, so we may need to revisit when marking a different // value as precise. if (!processedGEPs.insert(GEP).second) continue; // Mismatch if both constant and unequal, otherwise be conservative. bool bMismatch = false; auto idx = GEP->idx_begin(); idx++; unsigned i = 0; // FIXME: When i points outside idxList, it's an indication that this GEP // is deeper than the one we are matching. This can happen with vector // components or aggregates when marking the aggregate precise, such as // when propagating through call with aggregate argument. This solution // only prevents OOB memory access, it does not fix the underlying // problems that lead to it, which will likely require significant work - // perhaps even a rewrite using alias analysis or some other more accurate // mechanism. while (idx != GEP->idx_end() && i < idxList.size()) { if (ConstantInt *C = dyn_cast<ConstantInt>(*idx)) { if (ConstantInt *CRef = dyn_cast<ConstantInt>(idxList[i])) { if (CRef->getLimitedValue() != C->getLimitedValue()) { bMismatch = true; break; } } } idx++; i++; } if (bMismatch) continue; if ((unsigned)idxList.size() == i) { // Mark leaf users if (Processed(GEP)) continue; PropagateOnPointerUsers(GEP); } else { // Recurse GEP users PropagateThroughGEPs( GEP, ArrayRef<Value *>(idxList.data() + i, idxList.end()), processedGEPs); } } else if (CallInst *CI = dyn_cast<CallInst>(U)) { // Root pointer or intermediate GEP used in call. // If it may write to the pointer, we must mark the call and recurse // arguments. // This also widens the precise propagation to the entire aggregate // pointed to by the root ptr or intermediate GEP. PropagateOnPointerUsedInCall(Ptr, CI); } } } void DxilPrecisePropagatePass::PropagateOnPointerUsedInCall(Value *Ptr, CallInst *CI) { bool bReadOnly = true; Function *F = CI->getCalledFunction(); // skip starting points (dx.attribute.precise calls) if (HLModule::HasPreciseAttribute(F)) return; const DxilFunctionAnnotation *funcAnnotation = m_pDM->GetTypeSystem().GetFunctionAnnotation(F); if (funcAnnotation) { for (unsigned i = 0; i < CI->getNumArgOperands(); ++i) { if (Ptr != CI->getArgOperand(i)) continue; const DxilParameterAnnotation &paramAnnotation = funcAnnotation->GetParameterAnnotation(i); // OutputPatch and OutputStream will be checked after scalar repl. // Here only check out/inout if (paramAnnotation.GetParamInputQual() == DxilParamInputQual::Out || paramAnnotation.GetParamInputQual() == DxilParamInputQual::Inout) { bReadOnly = false; break; } } } else { bReadOnly = false; } if (!bReadOnly) { AddToWorkList(CI); } } void FuncInfo::Init(Function *F) { if (!pPostDom) { pPostDom = make_unique<DominatorTreeBase<BasicBlock>>(true); pPostDom->recalculate(*F); CtrlDep.Compute(F, *pPostDom); } } void FuncInfo::Clear() { CtrlDep.Clear(); pPostDom.reset(); } FuncInfo &DxilPrecisePropagatePass::GetFuncInfo(Function *F) { auto &FI = m_FuncInfo[F]; if (!FI) { FI = make_unique<FuncInfo>(); FI->Init(F); } return *FI.get(); } void DxilPrecisePropagatePass::PropagateCtrlDep(FuncInfo &FI, BasicBlock *BB) { if (Processed(BB)) return; const BasicBlockSet &CtrlDepSet = FI.CtrlDep.GetCDBlocks(BB); for (BasicBlock *B : CtrlDepSet) { AddToWorkList(B->getTerminator()); } } void DxilPrecisePropagatePass::PropagateCtrlDep(BasicBlock *BB) { FuncInfo &FI = GetFuncInfo(BB->getParent()); PropagateCtrlDep(FI, BB); } void DxilPrecisePropagatePass::PropagateCtrlDep(Instruction *I) { PropagateCtrlDep(I->getParent()); } ModulePass *llvm::createDxilPrecisePropagatePass() { return new DxilPrecisePropagatePass(); } INITIALIZE_PASS(DxilPrecisePropagatePass, "hlsl-dxil-precise", "DXIL precise attribute propagate", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilTargetTransformInfo.cpp
//===-- DxilTargetTransformInfo.cpp - DXIL specific TTI pass ----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // \file // This file implements a TargetTransformInfo analysis pass specific to the // DXIL. Only implemented isSourceOfDivergence for DivergenceAnalysis. // //===----------------------------------------------------------------------===// #include "DxilTargetTransformInfo.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "llvm/CodeGen/BasicTTIImpl.h" using namespace llvm; using namespace hlsl; #define DEBUG_TYPE "DXILtti" // For BasicTTImpl cl::opt<unsigned> llvm::PartialUnrollingThreshold("partial-unrolling-threshold", cl::init(0), cl::desc("Threshold for partial unrolling"), cl::Hidden); DxilTTIImpl::DxilTTIImpl(const TargetMachine *TM, const Function &F, hlsl::DxilModule &DM, bool ThreadGroup) : BaseT(TM, F.getParent()->getDataLayout()), m_pHlslOP(DM.GetOP()), m_isThreadGroup(ThreadGroup) {} namespace { bool IsDxilOpSourceOfDivergence(const CallInst *CI, OP *hlslOP, bool ThreadGroup) { DXIL::OpCode opcode = hlslOP->GetDxilOpFuncCallInst(CI); switch (opcode) { case DXIL::OpCode::AtomicBinOp: case DXIL::OpCode::AtomicCompareExchange: case DXIL::OpCode::LoadInput: case DXIL::OpCode::BufferUpdateCounter: case DXIL::OpCode::CycleCounterLegacy: case DXIL::OpCode::DomainLocation: case DXIL::OpCode::Coverage: case DXIL::OpCode::EvalCentroid: case DXIL::OpCode::EvalSampleIndex: case DXIL::OpCode::EvalSnapped: case DXIL::OpCode::FlattenedThreadIdInGroup: case DXIL::OpCode::GSInstanceID: case DXIL::OpCode::InnerCoverage: case DXIL::OpCode::LoadOutputControlPoint: case DXIL::OpCode::LoadPatchConstant: case DXIL::OpCode::OutputControlPointID: case DXIL::OpCode::PrimitiveID: case DXIL::OpCode::RenderTargetGetSampleCount: case DXIL::OpCode::RenderTargetGetSamplePosition: case DXIL::OpCode::ThreadId: case DXIL::OpCode::ThreadIdInGroup: return true; case DXIL::OpCode::GroupId: return !ThreadGroup; default: return false; } } } // namespace /// /// \returns true if the result of the value could potentially be /// different across dispatch or thread group. bool DxilTTIImpl::isSourceOfDivergence(const Value *V) const { if (dyn_cast<Argument>(V)) return true; // Atomics are divergent because they are executed sequentially: when an // atomic operation refers to the same address in each thread, then each // thread after the first sees the value written by the previous thread as // original value. if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V)) return true; if (const CallInst *CI = dyn_cast<CallInst>(V)) { // Assume none dxil instrincis function calls are a source of divergence. if (!m_pHlslOP->IsDxilOpFuncCallInst(CI)) return true; return IsDxilOpSourceOfDivergence(CI, m_pHlslOP, m_isThreadGroup); } return false; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLOperations.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLOperations.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Implementation of DXIL operations. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/HLOperations.h" #include "dxc/HlslIntrinsicOp.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Support/raw_ostream.h" using namespace hlsl; using namespace llvm; namespace hlsl { const char HLPrefixStr[] = "dx.hl"; const char *const HLPrefix = HLPrefixStr; static const char HLLowerStrategyStr[] = "dx.hlls"; static const char *const HLLowerStrategy = HLLowerStrategyStr; static const char HLWaveSensitiveStr[] = "dx.wave-sensitive"; static const char *const HLWaveSensitive = HLWaveSensitiveStr; static StringRef HLOpcodeGroupNames[]{ "notHLDXIL", // NotHL, "<ext>", // HLExtIntrinsic - should always refer through extension "op", // HLIntrinsic, "cast", // HLCast, "init", // HLInit, "binop", // HLBinOp, "unop", // HLUnOp, "subscript", // HLSubscript, "matldst", // HLMatLoadStore, "select", // HLSelect, "createhandle", // HLCreateHandle, "createnodeoutputhandle", // HLCreateNodeOutputHandle "indexnodehandle", // HLIndexNodeHandle: "createnodeinputrecordhandle", // HLCreateNodeInputRecordHandle "annotatehandle", // HLAnnotateHandle, "annotatenodehandle", // HLAnnotateNodeHandle "annotatenoderecordhandle", // HLAnnotateNodeRecordHandle "numOfHLDXIL", // NumOfHLOps }; static_assert(_countof(HLOpcodeGroupNames) == 1 + (size_t)HLOpcodeGroup::NumOfHLOps, "otherwise, tables out of sync"); static StringRef HLOpcodeGroupFullNames[]{ "notHLDXIL", // NotHL, "<ext>", // HLExtIntrinsic - should aways refer through extension "dx.hl.op", // HLIntrinsic, "dx.hl.cast", // HLCast, "dx.hl.init", // HLInit, "dx.hl.binop", // HLBinOp, "dx.hl.unop", // HLUnOp, "dx.hl.subscript", // HLSubscript, "dx.hl.matldst", // HLMatLoadStore, "dx.hl.select", // HLSelect, "dx.hl.createhandle", // HLCreateHandle, "dx.hl.createnodeoutputhandle", // HLCreateNodeHandle "dx.hl.indexnodehandle", // HLIndexNodeHandle "dx.hl.createnodeinputrecordhandle", // HLCreateNodeInputRecordHandle "dx.hl.annotatehandle", // HLAnnotateHandle, "dx.hl.annotatenodehandle", // HLAnnotateNodeHandle, "dx.hl.annotatenoderecordhandle", // HLAnnotateNodeRecordHandle "numOfHLDXIL", // NumOfHLOps }; static_assert(_countof(HLOpcodeGroupFullNames) == 1 + (size_t)HLOpcodeGroup::NumOfHLOps, "otherwise, tables out of sync"); static HLOpcodeGroup GetHLOpcodeGroupInternal(StringRef group) { return llvm::StringSwitch<HLOpcodeGroup>(group) .Case("op", HLOpcodeGroup::HLIntrinsic) .Case("cast", HLOpcodeGroup::HLCast) .Case("init", HLOpcodeGroup::HLInit) .Case("binop", HLOpcodeGroup::HLBinOp) .Case("unop", HLOpcodeGroup::HLUnOp) .Case("subscript", HLOpcodeGroup::HLSubscript) .Case("matldst", HLOpcodeGroup::HLMatLoadStore) .Case("select", HLOpcodeGroup::HLSelect) .Case("createhandle", HLOpcodeGroup::HLCreateHandle) .Case("createnodeoutputhandle", HLOpcodeGroup::HLCreateNodeOutputHandle) .Case("indexnodehandle", HLOpcodeGroup::HLIndexNodeHandle) .Case("createnodeinputrecordhandle", HLOpcodeGroup::HLCreateNodeInputRecordHandle) .Case("annotatehandle", HLOpcodeGroup::HLAnnotateHandle) .Case("annotatenodehandle", HLOpcodeGroup::HLAnnotateNodeHandle) .Case("annotatenoderecordhandle", HLOpcodeGroup::HLAnnotateNodeRecordHandle) .Default(HLOpcodeGroup::NotHL); } // GetHLOpGroup by function name. HLOpcodeGroup GetHLOpcodeGroupByName(const Function *F) { StringRef name = F->getName(); if (!name.startswith(HLPrefix)) { // This could be an external intrinsic, but this function // won't recognize those as such. Use GetHLOpcodeGroupByName // to make that distinction. return HLOpcodeGroup::NotHL; } const unsigned prefixSize = sizeof(HLPrefixStr); const unsigned groupEnd = name.find_first_of('.', prefixSize); StringRef group = name.substr(prefixSize, groupEnd - prefixSize); return GetHLOpcodeGroupInternal(group); } HLOpcodeGroup GetHLOpcodeGroup(llvm::Function *F) { llvm::StringRef name = GetHLOpcodeGroupNameByAttr(F); HLOpcodeGroup result = GetHLOpcodeGroupInternal(name); if (result == HLOpcodeGroup::NotHL) { result = name.empty() ? result : HLOpcodeGroup::HLExtIntrinsic; } if (result == HLOpcodeGroup::NotHL) { result = GetHLOpcodeGroupByName(F); } return result; } llvm::StringRef GetHLOpcodeGroupNameByAttr(llvm::Function *F) { Attribute groupAttr = F->getFnAttribute(hlsl::HLPrefix); StringRef group = groupAttr.getValueAsString(); return group; } StringRef GetHLOpcodeGroupName(HLOpcodeGroup op) { switch (op) { case HLOpcodeGroup::HLCast: case HLOpcodeGroup::HLInit: case HLOpcodeGroup::HLBinOp: case HLOpcodeGroup::HLUnOp: case HLOpcodeGroup::HLIntrinsic: case HLOpcodeGroup::HLSubscript: case HLOpcodeGroup::HLMatLoadStore: case HLOpcodeGroup::HLSelect: case HLOpcodeGroup::HLCreateHandle: case HLOpcodeGroup::HLCreateNodeOutputHandle: case HLOpcodeGroup::HLIndexNodeHandle: case HLOpcodeGroup::HLCreateNodeInputRecordHandle: case HLOpcodeGroup::HLAnnotateHandle: case HLOpcodeGroup::HLAnnotateNodeHandle: case HLOpcodeGroup::HLAnnotateNodeRecordHandle: return HLOpcodeGroupNames[static_cast<unsigned>(op)]; default: llvm_unreachable("invalid op"); return ""; } } StringRef GetHLOpcodeGroupFullName(HLOpcodeGroup op) { switch (op) { case HLOpcodeGroup::HLCast: case HLOpcodeGroup::HLInit: case HLOpcodeGroup::HLBinOp: case HLOpcodeGroup::HLUnOp: case HLOpcodeGroup::HLIntrinsic: case HLOpcodeGroup::HLSubscript: case HLOpcodeGroup::HLMatLoadStore: case HLOpcodeGroup::HLSelect: case HLOpcodeGroup::HLCreateHandle: case HLOpcodeGroup::HLCreateNodeOutputHandle: case HLOpcodeGroup::HLIndexNodeHandle: case HLOpcodeGroup::HLCreateNodeInputRecordHandle: case HLOpcodeGroup::HLAnnotateHandle: case HLOpcodeGroup::HLAnnotateNodeHandle: case HLOpcodeGroup::HLAnnotateNodeRecordHandle: return HLOpcodeGroupFullNames[static_cast<unsigned>(op)]; default: llvm_unreachable("invalid op"); return ""; } } llvm::StringRef GetHLOpcodeName(HLUnaryOpcode Op) { switch (Op) { case HLUnaryOpcode::PostInc: return "++"; case HLUnaryOpcode::PostDec: return "--"; case HLUnaryOpcode::PreInc: return "++"; case HLUnaryOpcode::PreDec: return "--"; case HLUnaryOpcode::Plus: return "+"; case HLUnaryOpcode::Minus: return "-"; case HLUnaryOpcode::Not: return "~"; case HLUnaryOpcode::LNot: return "!"; case HLUnaryOpcode::Invalid: case HLUnaryOpcode::NumOfUO: // Invalid Unary Ops break; } llvm_unreachable("Unknown unary operator"); } llvm::StringRef GetHLOpcodeName(HLBinaryOpcode Op) { switch (Op) { case HLBinaryOpcode::Mul: return "*"; case HLBinaryOpcode::UDiv: case HLBinaryOpcode::Div: return "/"; case HLBinaryOpcode::URem: case HLBinaryOpcode::Rem: return "%"; case HLBinaryOpcode::Add: return "+"; case HLBinaryOpcode::Sub: return "-"; case HLBinaryOpcode::Shl: return "<<"; case HLBinaryOpcode::UShr: case HLBinaryOpcode::Shr: return ">>"; case HLBinaryOpcode::ULT: case HLBinaryOpcode::LT: return "<"; case HLBinaryOpcode::UGT: case HLBinaryOpcode::GT: return ">"; case HLBinaryOpcode::ULE: case HLBinaryOpcode::LE: return "<="; case HLBinaryOpcode::UGE: case HLBinaryOpcode::GE: return ">="; case HLBinaryOpcode::EQ: return "=="; case HLBinaryOpcode::NE: return "!="; case HLBinaryOpcode::And: return "&"; case HLBinaryOpcode::Xor: return "^"; case HLBinaryOpcode::Or: return "|"; case HLBinaryOpcode::LAnd: return "&&"; case HLBinaryOpcode::LOr: return "||"; case HLBinaryOpcode::Invalid: case HLBinaryOpcode::NumOfBO: // Invalid Binary Ops break; } llvm_unreachable("Invalid OpCode!"); } llvm::StringRef GetHLOpcodeName(HLSubscriptOpcode Op) { switch (Op) { case HLSubscriptOpcode::DefaultSubscript: return "[]"; case HLSubscriptOpcode::ColMatSubscript: return "colMajor[]"; case HLSubscriptOpcode::RowMatSubscript: return "rowMajor[]"; case HLSubscriptOpcode::ColMatElement: return "colMajor_m"; case HLSubscriptOpcode::RowMatElement: return "rowMajor_m"; case HLSubscriptOpcode::DoubleSubscript: return "[][]"; case HLSubscriptOpcode::CBufferSubscript: return "cb"; case HLSubscriptOpcode::VectorSubscript: return "vector[]"; } return ""; } llvm::StringRef GetHLOpcodeName(HLCastOpcode Op) { switch (Op) { case HLCastOpcode::DefaultCast: return "default"; case HLCastOpcode::ToUnsignedCast: return "toUnsigned"; case HLCastOpcode::FromUnsignedCast: return "fromUnsigned"; case HLCastOpcode::UnsignedUnsignedCast: return "unsignedUnsigned"; case HLCastOpcode::ColMatrixToVecCast: return "colMatToVec"; case HLCastOpcode::RowMatrixToVecCast: return "rowMatToVec"; case HLCastOpcode::ColMatrixToRowMatrix: return "colMatToRowMat"; case HLCastOpcode::RowMatrixToColMatrix: return "rowMatToColMat"; case HLCastOpcode::HandleToResCast: return "handleToRes"; } return ""; } llvm::StringRef GetHLOpcodeName(HLMatLoadStoreOpcode Op) { switch (Op) { case HLMatLoadStoreOpcode::ColMatLoad: return "colLoad"; case HLMatLoadStoreOpcode::ColMatStore: return "colStore"; case HLMatLoadStoreOpcode::RowMatLoad: return "rowLoad"; case HLMatLoadStoreOpcode::RowMatStore: return "rowStore"; } llvm_unreachable("invalid matrix load store operator"); } StringRef GetHLLowerStrategy(Function *F) { llvm::Attribute A = F->getFnAttribute(HLLowerStrategy); llvm::StringRef LowerStrategy = A.getValueAsString(); return LowerStrategy; } void SetHLLowerStrategy(Function *F, StringRef S) { F->addFnAttr(HLLowerStrategy, S); } // Set function attribute indicating wave-sensitivity void SetHLWaveSensitive(Function *F) { F->addFnAttr(HLWaveSensitive, "y"); } // Return if this Function is dependent on other wave members indicated by // attribute bool IsHLWaveSensitive(Function *F) { AttributeSet attrSet = F->getAttributes(); return attrSet.hasAttribute(AttributeSet::FunctionIndex, HLWaveSensitive); } static std::string GetHLFunctionAttributeMangling(const AttributeSet &attribs); std::string GetHLFullName(HLOpcodeGroup op, unsigned opcode, const AttributeSet &attribs = AttributeSet()) { assert(op != HLOpcodeGroup::HLExtIntrinsic && "else table name should be used"); std::string opName = GetHLOpcodeGroupFullName(op).str() + "."; switch (op) { case HLOpcodeGroup::HLBinOp: { HLBinaryOpcode binOp = static_cast<HLBinaryOpcode>(opcode); return opName + GetHLOpcodeName(binOp).str(); } case HLOpcodeGroup::HLUnOp: { HLUnaryOpcode unOp = static_cast<HLUnaryOpcode>(opcode); return opName + GetHLOpcodeName(unOp).str(); } case HLOpcodeGroup::HLIntrinsic: { // intrinsic with same signature will share the funciton now // The opcode is in arg0. return opName + GetHLFunctionAttributeMangling(attribs); } case HLOpcodeGroup::HLMatLoadStore: { HLMatLoadStoreOpcode matOp = static_cast<HLMatLoadStoreOpcode>(opcode); return opName + GetHLOpcodeName(matOp).str(); } case HLOpcodeGroup::HLSubscript: { HLSubscriptOpcode subOp = static_cast<HLSubscriptOpcode>(opcode); return opName + GetHLOpcodeName(subOp).str() + "." + GetHLFunctionAttributeMangling(attribs); } case HLOpcodeGroup::HLCast: { HLCastOpcode castOp = static_cast<HLCastOpcode>(opcode); return opName + GetHLOpcodeName(castOp).str(); } case HLOpcodeGroup::HLCreateHandle: case HLOpcodeGroup::HLAnnotateHandle: return opName; default: return opName + GetHLFunctionAttributeMangling(attribs); } } // Get opcode from arg0 of function call. unsigned GetHLOpcode(const CallInst *CI) { Value *idArg = CI->getArgOperand(HLOperandIndex::kOpcodeIdx); Constant *idConst = cast<Constant>(idArg); return idConst->getUniqueInteger().getLimitedValue(); } unsigned GetRowMajorOpcode(HLOpcodeGroup group, unsigned opcode) { switch (group) { case HLOpcodeGroup::HLMatLoadStore: { HLMatLoadStoreOpcode matOp = static_cast<HLMatLoadStoreOpcode>(opcode); switch (matOp) { case HLMatLoadStoreOpcode::ColMatLoad: return static_cast<unsigned>(HLMatLoadStoreOpcode::RowMatLoad); case HLMatLoadStoreOpcode::ColMatStore: return static_cast<unsigned>(HLMatLoadStoreOpcode::RowMatStore); default: return opcode; } } break; case HLOpcodeGroup::HLSubscript: { HLSubscriptOpcode subOp = static_cast<HLSubscriptOpcode>(opcode); switch (subOp) { case HLSubscriptOpcode::ColMatElement: return static_cast<unsigned>(HLSubscriptOpcode::RowMatElement); case HLSubscriptOpcode::ColMatSubscript: return static_cast<unsigned>(HLSubscriptOpcode::RowMatSubscript); default: return opcode; } } break; default: return opcode; } } unsigned GetUnsignedOpcode(unsigned opcode) { return GetUnsignedIntrinsicOpcode(static_cast<IntrinsicOp>(opcode)); } // For HLBinaryOpcode bool HasUnsignedOpcode(HLBinaryOpcode opcode) { switch (opcode) { case HLBinaryOpcode::Div: case HLBinaryOpcode::Rem: case HLBinaryOpcode::Shr: case HLBinaryOpcode::LT: case HLBinaryOpcode::GT: case HLBinaryOpcode::LE: case HLBinaryOpcode::GE: return true; default: return false; } } HLBinaryOpcode GetUnsignedOpcode(HLBinaryOpcode opcode) { switch (opcode) { case HLBinaryOpcode::Div: return HLBinaryOpcode::UDiv; case HLBinaryOpcode::Rem: return HLBinaryOpcode::URem; case HLBinaryOpcode::Shr: return HLBinaryOpcode::UShr; case HLBinaryOpcode::LT: return HLBinaryOpcode::ULT; case HLBinaryOpcode::GT: return HLBinaryOpcode::UGT; case HLBinaryOpcode::LE: return HLBinaryOpcode::ULE; case HLBinaryOpcode::GE: return HLBinaryOpcode::UGE; default: return opcode; } } static AttributeSet GetHLFunctionAttributes(LLVMContext &C, FunctionType *funcTy, const AttributeSet &origAttribs, HLOpcodeGroup group, unsigned opcode) { // Always add nounwind AttributeSet attribs = AttributeSet::get(C, AttributeSet::FunctionIndex, ArrayRef<Attribute::AttrKind>({Attribute::NoUnwind})); auto addAttr = [&](Attribute::AttrKind Attr) { if (!attribs.hasAttribute(AttributeSet::FunctionIndex, Attr)) attribs = attribs.addAttribute(C, AttributeSet::FunctionIndex, Attr); }; auto copyAttr = [&](Attribute::AttrKind Attr) { if (origAttribs.hasAttribute(AttributeSet::FunctionIndex, Attr)) addAttr(Attr); }; auto copyStrAttr = [&](StringRef Kind) { if (origAttribs.hasAttribute(AttributeSet::FunctionIndex, Kind)) attribs = attribs.addAttribute( C, AttributeSet::FunctionIndex, Kind, origAttribs.getAttribute(AttributeSet::FunctionIndex, Kind) .getValueAsString()); }; // Copy attributes we preserve from the original function. copyAttr(Attribute::ReadOnly); copyAttr(Attribute::ReadNone); copyStrAttr(HLWaveSensitive); switch (group) { case HLOpcodeGroup::HLUnOp: case HLOpcodeGroup::HLBinOp: case HLOpcodeGroup::HLCast: case HLOpcodeGroup::HLSubscript: addAttr(Attribute::ReadNone); break; case HLOpcodeGroup::HLInit: if (!funcTy->getReturnType()->isVoidTy()) { addAttr(Attribute::ReadNone); } break; case HLOpcodeGroup::HLMatLoadStore: { HLMatLoadStoreOpcode matOp = static_cast<HLMatLoadStoreOpcode>(opcode); if (matOp == HLMatLoadStoreOpcode::ColMatLoad || matOp == HLMatLoadStoreOpcode::RowMatLoad) addAttr(Attribute::ReadOnly); } break; case HLOpcodeGroup::HLCreateHandle: { addAttr(Attribute::ReadNone); } break; case HLOpcodeGroup::HLAnnotateHandle: { addAttr(Attribute::ReadNone); } break; case HLOpcodeGroup::HLIntrinsic: { IntrinsicOp intrinsicOp = static_cast<IntrinsicOp>(opcode); switch (intrinsicOp) { default: break; case IntrinsicOp::IOP_DeviceMemoryBarrierWithGroupSync: case IntrinsicOp::IOP_DeviceMemoryBarrier: case IntrinsicOp::IOP_GroupMemoryBarrierWithGroupSync: case IntrinsicOp::IOP_GroupMemoryBarrier: case IntrinsicOp::IOP_AllMemoryBarrierWithGroupSync: case IntrinsicOp::IOP_AllMemoryBarrier: addAttr(Attribute::NoDuplicate); break; } } break; case HLOpcodeGroup::NotHL: case HLOpcodeGroup::HLExtIntrinsic: case HLOpcodeGroup::HLSelect: case HLOpcodeGroup::NumOfHLOps: // No default attributes for these opcodes. break; } assert(!(attribs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) && attribs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly)) && "conflicting ReadNone and ReadOnly attributes"); return attribs; } static std::string GetHLFunctionAttributeMangling(const AttributeSet &attribs) { std::string mangledName; raw_string_ostream mangledNameStr(mangledName); // Capture for adding in canonical order later. bool ReadNone = false; bool ReadOnly = false; bool ArgMemOnly = false; bool NoDuplicate = false; bool WaveSensitive = false; // Ensure every function attribute is recognized. for (unsigned Slot = 0; Slot < attribs.getNumSlots(); Slot++) { if (attribs.getSlotIndex(Slot) == AttributeSet::FunctionIndex) { for (auto it = attribs.begin(Slot), e = attribs.end(Slot); it != e; it++) { if (it->isEnumAttribute()) { switch (it->getKindAsEnum()) { case Attribute::ReadNone: ReadNone = true; break; case Attribute::ReadOnly: ReadOnly = true; break; case Attribute::ArgMemOnly: ArgMemOnly = true; break; case Attribute::NoDuplicate: NoDuplicate = true; break; case Attribute::NoUnwind: // All intrinsics have this attribute, so mangling is unaffected. break; default: assert(false && "unexpected attribute for HLOperation"); } } else if (it->isStringAttribute()) { StringRef Kind = it->getKindAsString(); if (Kind == HLWaveSensitive) { assert(it->getValueAsString() == "y" && "otherwise, unexpected value for WaveSensitive attribute"); WaveSensitive = true; } else { assert(Kind == "dx.hlls" && "unexpected string function attribute for HLOperation"); } } } } } // Validate attribute combinations. assert(!(ReadNone && ReadOnly && ArgMemOnly) && "ReadNone, ReadOnly, and ArgMemOnly are mutually exclusive"); // Add mangling in canonical order if (NoDuplicate) mangledNameStr << "nd"; if (ReadNone) mangledNameStr << "rn"; if (ReadOnly) mangledNameStr << "ro"; if (WaveSensitive) mangledNameStr << "wave"; return mangledName; } Function *GetOrCreateHLFunction(Module &M, FunctionType *funcTy, HLOpcodeGroup group, unsigned opcode) { AttributeSet attribs; return GetOrCreateHLFunction(M, funcTy, group, nullptr, nullptr, opcode, attribs); } Function *GetOrCreateHLFunction(Module &M, FunctionType *funcTy, HLOpcodeGroup group, StringRef *groupName, StringRef *fnName, unsigned opcode) { AttributeSet attribs; return GetOrCreateHLFunction(M, funcTy, group, groupName, fnName, opcode, attribs); } Function *GetOrCreateHLFunction(Module &M, FunctionType *funcTy, HLOpcodeGroup group, unsigned opcode, const AttributeSet &attribs) { return GetOrCreateHLFunction(M, funcTy, group, nullptr, nullptr, opcode, attribs); } Function *GetOrCreateHLFunction(Module &M, FunctionType *funcTy, HLOpcodeGroup group, StringRef *groupName, StringRef *fnName, unsigned opcode, const AttributeSet &origAttribs) { // Set/transfer all common attributes AttributeSet attribs = GetHLFunctionAttributes(M.getContext(), funcTy, origAttribs, group, opcode); std::string mangledName; raw_string_ostream mangledNameStr(mangledName); if (group == HLOpcodeGroup::HLExtIntrinsic) { assert(groupName && "else intrinsic should have been rejected"); assert(fnName && "else intrinsic should have been rejected"); mangledNameStr << *groupName; mangledNameStr << '.'; mangledNameStr << *fnName; attribs = attribs.addAttribute(M.getContext(), AttributeSet::FunctionIndex, hlsl::HLPrefix, *groupName); } else { mangledNameStr << GetHLFullName(group, opcode, attribs); mangledNameStr << '.'; funcTy->print(mangledNameStr); } mangledNameStr.flush(); // Avoid getOrInsertFunction to verify attributes and type without casting. Function *F = cast_or_null<Function>(M.getNamedValue(mangledName)); if (F) { assert(F->getFunctionType() == funcTy && "otherwise, function type mismatch not captured by mangling"); // Compare attribute mangling to ensure function attributes are as expected. assert( GetHLFunctionAttributeMangling(F->getAttributes().getFnAttributes()) == GetHLFunctionAttributeMangling(attribs) && "otherwise, function attribute mismatch not captured by mangling"); } else { F = cast<Function>(M.getOrInsertFunction(mangledName, funcTy, attribs)); } return F; } // HLFunction with body cannot share with HLFunction without body. // So need add name. Function *GetOrCreateHLFunctionWithBody(Module &M, FunctionType *funcTy, HLOpcodeGroup group, unsigned opcode, StringRef name) { // Set/transfer all common attributes AttributeSet attribs = GetHLFunctionAttributes(M.getContext(), funcTy, AttributeSet(), group, opcode); std::string operatorName = GetHLFullName(group, opcode, attribs); std::string mangledName = operatorName + "." + name.str(); raw_string_ostream mangledNameStr(mangledName); funcTy->print(mangledNameStr); mangledNameStr.flush(); Function *F = cast<Function>(M.getOrInsertFunction(mangledName, funcTy, attribs)); F->setLinkage(llvm::GlobalValue::LinkageTypes::InternalLinkage); return F; } Value *callHLFunction(Module &Module, HLOpcodeGroup OpcodeGroup, unsigned Opcode, Type *RetTy, ArrayRef<Value *> Args, IRBuilder<> &Builder) { AttributeSet attribs; return callHLFunction(Module, OpcodeGroup, Opcode, RetTy, Args, attribs, Builder); } Value *callHLFunction(Module &Module, HLOpcodeGroup OpcodeGroup, unsigned Opcode, Type *RetTy, ArrayRef<Value *> Args, const AttributeSet &attribs, IRBuilder<> &Builder) { SmallVector<Type *, 4> ArgTys; ArgTys.reserve(Args.size()); for (Value *Arg : Args) ArgTys.emplace_back(Arg->getType()); FunctionType *FuncTy = FunctionType::get(RetTy, ArgTys, /* isVarArg */ false); Function *Func = GetOrCreateHLFunction(Module, FuncTy, OpcodeGroup, Opcode, attribs); return Builder.CreateCall(Func, Args); } } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilEliminateOutputDynamicIndexing.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilEliminateOutputDynamicIndexing.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Eliminate dynamic indexing on output. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilInstructions.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilSignatureElement.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/Support/Global.h" #include "llvm/ADT/MapVector.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" using namespace llvm; using namespace hlsl; namespace { class DxilEliminateOutputDynamicIndexing : public ModulePass { private: public: static char ID; // Pass identification, replacement for typeid explicit DxilEliminateOutputDynamicIndexing() : ModulePass(ID) {} StringRef getPassName() const override { return "DXIL eliminate output dynamic indexing"; } bool runOnModule(Module &M) override { DxilModule &DM = M.GetOrCreateDxilModule(); bool bUpdated = false; if (DM.GetShaderModel()->IsHS()) { // HS write outputs into share memory, dynamic indexing is OK. return bUpdated; } // Skip pass thru entry. if (!DM.GetEntryFunction()) return bUpdated; hlsl::OP *hlslOP = DM.GetOP(); bUpdated |= EliminateDynamicOutput(hlslOP, DXIL::OpCode::StoreOutput, DM.GetOutputSignature(), DM.GetEntryFunction()); return bUpdated; } private: bool EliminateDynamicOutput(hlsl::OP *hlslOP, DXIL::OpCode opcode, DxilSignature &outputSig, Function *Entry); void ReplaceDynamicOutput(ArrayRef<Value *> tmpSigElts, Value *sigID, Value *zero, Function *F); void StoreTmpSigToOutput(ArrayRef<Value *> tmpSigElts, unsigned row, Value *opcode, Value *sigID, Function *StoreOutput, Function *Entry); }; // Wrapper for StoreOutput and StorePachConstant which has same signature. // void (opcode, sigId, rowIndex, colIndex, value); class DxilOutputStore { public: const llvm::CallInst *Instr; // Construction and identification DxilOutputStore(llvm::CallInst *pInstr) : Instr(pInstr) {} // Validation support bool isAllowed() const { return true; } bool isArgumentListValid() const { if (5 != llvm::dyn_cast<llvm::CallInst>(Instr)->getNumArgOperands()) return false; return true; } // Accessors llvm::Value *get_outputSigId() const { return Instr->getOperand(DXIL::OperandIndex::kStoreOutputIDOpIdx); } llvm::Value *get_rowIndex() const { return Instr->getOperand(DXIL::OperandIndex::kStoreOutputRowOpIdx); } uint64_t get_colIndex() const { Value *col = Instr->getOperand(DXIL::OperandIndex::kStoreOutputColOpIdx); return cast<ConstantInt>(col)->getLimitedValue(); } llvm::Value *get_value() const { return Instr->getOperand(DXIL::OperandIndex::kStoreOutputValOpIdx); } }; bool DxilEliminateOutputDynamicIndexing::EliminateDynamicOutput( hlsl::OP *hlslOP, DXIL::OpCode opcode, DxilSignature &outputSig, Function *Entry) { auto &storeOutputs = hlslOP->GetOpFuncList(opcode); MapVector<Value *, Type *> dynamicSigSet; for (auto it : storeOutputs) { Function *F = it.second; // Skip overload not used. if (!F) continue; for (User *U : F->users()) { CallInst *CI = cast<CallInst>(U); DxilOutputStore store(CI); // Save dynamic indeed sigID. if (!isa<ConstantInt>(store.get_rowIndex())) { Value *sigID = store.get_outputSigId(); dynamicSigSet[sigID] = store.get_value()->getType(); } } } if (dynamicSigSet.empty()) return false; IRBuilder<> AllocaBuilder(dxilutil::FindAllocaInsertionPt(Entry)); Value *opcodeV = AllocaBuilder.getInt32(static_cast<unsigned>(opcode)); Value *zero = AllocaBuilder.getInt32(0); for (auto sig : dynamicSigSet) { Value *sigID = sig.first; Type *EltTy = sig.second; unsigned ID = cast<ConstantInt>(sigID)->getLimitedValue(); DxilSignatureElement &sigElt = outputSig.GetElement(ID); unsigned row = sigElt.GetRows(); unsigned col = sigElt.GetCols(); Type *AT = ArrayType::get(EltTy, row); std::vector<Value *> tmpSigElts(col); for (unsigned c = 0; c < col; c++) { Value *newCol = AllocaBuilder.CreateAlloca(AT); tmpSigElts[c] = newCol; } Function *F = hlslOP->GetOpFunc(opcode, EltTy); // Change store output to store tmpSigElts. ReplaceDynamicOutput(tmpSigElts, sigID, zero, F); // Store tmpSigElts to Output before return. StoreTmpSigToOutput(tmpSigElts, row, opcodeV, sigID, F, Entry); } return true; } void DxilEliminateOutputDynamicIndexing::ReplaceDynamicOutput( ArrayRef<Value *> tmpSigElts, Value *sigID, Value *zero, Function *F) { for (auto it = F->user_begin(); it != F->user_end();) { CallInst *CI = cast<CallInst>(*(it++)); DxilOutputStore store(CI); if (sigID == store.get_outputSigId()) { uint64_t col = store.get_colIndex(); Value *tmpSigElt = tmpSigElts[col]; IRBuilder<> Builder(CI); Value *r = store.get_rowIndex(); // Store to tmpSigElt. Value *GEP = Builder.CreateInBoundsGEP(tmpSigElt, {zero, r}); Builder.CreateStore(store.get_value(), GEP); // Remove store output. CI->eraseFromParent(); } } } void DxilEliminateOutputDynamicIndexing::StoreTmpSigToOutput( ArrayRef<Value *> tmpSigElts, unsigned row, Value *opcode, Value *sigID, Function *StoreOutput, Function *Entry) { Value *args[] = {opcode, sigID, /*row*/ nullptr, /*col*/ nullptr, /*val*/ nullptr}; // Store the tmpSigElts to Output before every return. for (auto &BB : Entry->getBasicBlockList()) { if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) { IRBuilder<> Builder(RI); Value *zero = Builder.getInt32(0); for (unsigned c = 0; c < tmpSigElts.size(); c++) { Value *col = tmpSigElts[c]; args[DXIL::OperandIndex::kStoreOutputColOpIdx] = Builder.getInt8(c); for (unsigned r = 0; r < row; r++) { Value *GEP = Builder.CreateInBoundsGEP(col, {zero, Builder.getInt32(r)}); Value *V = Builder.CreateLoad(GEP); args[DXIL::OperandIndex::kStoreOutputRowOpIdx] = Builder.getInt32(r); args[DXIL::OperandIndex::kStoreOutputValOpIdx] = V; Builder.CreateCall(StoreOutput, args); } } } } } } // namespace char DxilEliminateOutputDynamicIndexing::ID = 0; ModulePass *llvm::createDxilEliminateOutputDynamicIndexingPass() { return new DxilEliminateOutputDynamicIndexing(); } INITIALIZE_PASS(DxilEliminateOutputDynamicIndexing, "hlsl-dxil-eliminate-output-dynamic", "DXIL eliminate output dynamic indexing", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilExportMap.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilExportMap.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // dxilutil::ExportMap for handling -exports option. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/DxilExportMap.h" #include "dxc/DXIL/DxilTypeSystem.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/Support/Global.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringSet.h" #include "llvm/IR/Function.h" #include "llvm/Support/raw_ostream.h" #include <set> #include <string> #include <vector> using namespace llvm; using namespace hlsl; namespace hlsl { namespace dxilutil { void ExportMap::clear() { m_ExportMap.clear(); } bool ExportMap::empty() const { return m_ExportMap.empty(); } bool ExportMap::ParseExports(const std::vector<std::string> &exportOpts, llvm::raw_ostream &errors) { for (auto &str : exportOpts) { llvm::StringRef exports = StoreString(str); size_t start = 0; size_t end = llvm::StringRef::npos; // def1;def2;... while (true) { end = exports.find_first_of(';', start); llvm::StringRef exportDef = exports.slice(start, end); // def: export1[[,export2,...]=internal] llvm::StringRef internalName = exportDef; size_t equals = exportDef.find_first_of('='); if (equals != llvm::StringRef::npos) { internalName = exportDef.substr(equals + 1); size_t exportStart = 0; while (true) { size_t comma = exportDef.find_first_of(',', exportStart); if (comma == llvm::StringRef::npos || comma > equals) break; if (exportStart < comma) Add(exportDef.slice(exportStart, comma), internalName); exportStart = comma + 1; } if (exportStart < equals) Add(exportDef.slice(exportStart, equals), internalName); } else { Add(internalName); } if (equals == 0 || internalName.empty()) { errors << "Invalid syntax for -exports: '" << exportDef << "'. Syntax is: export1[[,export2,...]=internal][;...]"; return false; } if (end == llvm::StringRef::npos) break; start = end + 1; } } return true; } void ExportMap::Add(llvm::StringRef exportName, llvm::StringRef internalName) { // Incoming strings may be escaped (because they originally come from // arguments) Unescape them here, if necessary if (exportName.startswith("\\")) { std::string str; llvm::raw_string_ostream os(str); PrintUnescapedString(exportName, os); exportName = StoreString(os.str()); } if (internalName.startswith("\\")) { std::string str; llvm::raw_string_ostream os(str); PrintUnescapedString(internalName, os); internalName = StoreString(os.str()); } if (internalName.empty()) internalName = exportName; exportName = DemangleFunctionName(exportName); m_ExportMap[internalName].insert(exportName); } ExportMap::const_iterator ExportMap::GetExportsByName(llvm::StringRef Name) const { ExportMap::const_iterator it = m_ExportMap.find(Name); StringRef unmangled = DemangleFunctionName(Name); if (it == end()) { if (Name.startswith(ManglingPrefix)) { it = m_ExportMap.find(unmangled); } else if (Name.startswith(EntryPrefix)) { it = m_ExportMap.find(Name.substr(strlen(EntryPrefix))); } } return it; } bool ExportMap::IsExported(llvm::StringRef original) const { if (m_ExportMap.empty()) return true; return GetExportsByName(original) != end(); } void ExportMap::BeginProcessing() { m_ExportNames.clear(); m_NameCollisions.clear(); m_UnusedExports.clear(); for (auto &it : m_ExportMap) { m_UnusedExports.emplace(it.getKey()); } } bool ExportMap::ProcessFunction(llvm::Function *F, bool collisionAvoidanceRenaming) { // Skip if already added. This can happen due to patch constant functions. if (m_RenameMap.find(F) != m_RenameMap.end()) return true; StringRef originalName = F->getName(); StringRef unmangled = DemangleFunctionName(originalName); auto it = GetExportsByName(F->getName()); // Early out if not exported, and do optional collision avoidance if (it == end()) { F->setLinkage(GlobalValue::LinkageTypes::InternalLinkage); if (collisionAvoidanceRenaming) { std::string internalName = (Twine("internal.") + unmangled).str(); internalName = dxilutil::ReplaceFunctionName(originalName, internalName); F->setName(internalName); } return false; } F->setLinkage(GlobalValue::LinkageTypes::ExternalLinkage); // Add entry to m_RenameMap: auto &renames = m_RenameMap[F]; const llvm::StringSet<> &exportRenames = it->getValue(); llvm::StringRef internalName = it->getKey(); // mark export used UseExport(internalName); // Add identity first auto itIdentity = exportRenames.find(unmangled); if (exportRenames.empty() || itIdentity != exportRenames.end()) { if (exportRenames.size() > 1) renames.insert(originalName); ExportName(originalName); } else if (collisionAvoidanceRenaming) { // do optional collision avoidance for exports being renamed std::string tempName = (Twine("temp.") + unmangled).str(); tempName = dxilutil::ReplaceFunctionName(originalName, tempName); F->setName(tempName); } for (auto itName = exportRenames.begin(); itName != exportRenames.end(); itName++) { // Now add actual renames if (itName != itIdentity) { StringRef newName = StoreString( dxilutil::ReplaceFunctionName(F->getName(), itName->getKey())); renames.insert(newName); ExportName(newName); } } return true; } void ExportMap::RegisterExportedFunction(llvm::Function *F) { // Skip if already added if (m_RenameMap.find(F) != m_RenameMap.end()) return; F->setLinkage(GlobalValue::LinkageTypes::ExternalLinkage); NameSet &renames = m_RenameMap[F]; (void)(renames); // Don't actually add anything ExportName(F->getName()); } void ExportMap::UseExport(llvm::StringRef internalName) { auto it = m_UnusedExports.find(internalName); if (it != m_UnusedExports.end()) m_UnusedExports.erase(it); } void ExportMap::ExportName(llvm::StringRef exportName) { auto result = m_ExportNames.insert(exportName); if (!result.second) { // Already present, report collision m_NameCollisions.insert(exportName); } } bool ExportMap::EndProcessing() const { return m_UnusedExports.empty() && m_NameCollisions.empty(); } llvm::StringRef ExportMap::StoreString(llvm::StringRef str) { return *m_StringStorage.insert(str).first; } } // namespace dxilutil } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLResource.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLResource.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/HLResource.h" #include "dxc/Support/Global.h" namespace hlsl { //------------------------------------------------------------------------------ // // HLResource methods. // HLResource::HLResource() : DxilResource() {} } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilPatchShaderRecordBindingsShared.h
#pragma once #include <stdint.h> #define FallbackLayerRegisterSpace 214743647 // SRVs #define FallbackLayerHitGroupRecordByteAddressBufferRegister 0 #define FallbackLayerMissShaderRecordByteAddressBufferRegister 1 #define FallbackLayerRayGenShaderRecordByteAddressBufferRegister 2 #define FallbackLayerCallableShaderRecordByteAddressBufferRegister 3 // SRV & UAV #define FallbackLayerDescriptorHeapTable 0 // There's a driver issue on some hardware that has issues // starting a bindless table on any register but 0, so // make sure each bindless table has it's own register space #define FallbackLayerDescriptorHeapSpaceOffset 1 #define FallbackLayerNumDescriptorHeapSpacesPerView 10 // CBVs #define FallbackLayerDispatchConstantsRegister 0 #define FallbackLayerAccelerationStructureList 1 #ifndef HLSL struct ViewKey { unsigned int ViewType; union { unsigned int StructuredStride; // When ViewType == StructuredBuffer unsigned int SRVComponentType; // When ViewType != StructuredBuffer && // ViewType != RawBuffer }; }; struct ShaderInfo { const wchar_t *ExportName; unsigned int SamplerDescriptorSizeInBytes; unsigned int SrvCbvUavDescriptorSizeInBytes; unsigned int ShaderRecordIdentifierSizeInBytes; const void *pRootSignatureDesc; ViewKey *pSRVRegisterSpaceArray; unsigned int *pNumSRVSpaces; ViewKey *pUAVRegisterSpaceArray; unsigned int *pNumUAVSpaces; }; struct DispatchRaysConstants { uint32_t RayDispatchDimensionsWidth; uint32_t RayDispatchDimensionsHeight; uint32_t HitGroupShaderRecordStride; uint32_t MissShaderRecordStride; // 64-bit values uint64_t SamplerDescriptorHeapStart; uint64_t SrvCbvUavDescriptorHeapStart; }; enum DescriptorRangeTypes { SRV = 0, CBV, UAV, Sampler, NumRangeTypes }; enum RootSignatureParameterOffset { HitGroupRecord = 0, MissShaderRecord, RayGenShaderRecord, CallableShaderRecord, DispatchConstants, CbvSrvUavDescriptorHeapAliasedTables, SamplerDescriptorHeapAliasedTables, AccelerationStructuresList, #if ENABLE_UAV_LOG DebugUAVLog, #endif NumParameters }; #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLDeadFunctionElimination.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLDeadFunctionElimination.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/HLSL/HLModule.h" #include "llvm/IR/Function.h" #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" using namespace llvm; using namespace hlsl; namespace { class HLDeadFunctionElimination : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit HLDeadFunctionElimination() : ModulePass(ID) {} StringRef getPassName() const override { return "Remove all unused function except entry from HLModule"; } bool runOnModule(Module &M) override { if (M.HasHLModule()) { HLModule &HLM = M.GetHLModule(); bool IsLib = HLM.GetShaderModel()->IsLib(); // Remove unused functions except entry and patch constant func. // For library profile, only remove unused external functions. Function *EntryFunc = HLM.GetEntryFunction(); Function *PatchConstantFunc = HLM.GetPatchConstantFunction(); bool bChanged = false; while (dxilutil::RemoveUnusedFunctions(M, EntryFunc, PatchConstantFunc, IsLib)) bChanged = true; return bChanged; } return false; } }; } // namespace char HLDeadFunctionElimination::ID = 0; ModulePass *llvm::createHLDeadFunctionEliminationPass() { return new HLDeadFunctionElimination(); } INITIALIZE_PASS(HLDeadFunctionElimination, "hl-dfe", "Remove all unused function except entry from HLModule", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilTranslateRawBuffer.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilTranslateRawBuffer.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/Support/Global.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/Casting.h" #include <vector> using namespace llvm; using namespace hlsl; // Translate RawBufferLoad/RawBufferStore // This pass is to make sure that we generate correct buffer load for DXIL // For DXIL < 1.2, rawBufferLoad will be translated to BufferLoad instruction // without mask. namespace { class DxilTranslateRawBuffer : public ModulePass { public: static char ID; explicit DxilTranslateRawBuffer() : ModulePass(ID) {} bool runOnModule(Module &M) { unsigned major, minor; DxilModule &DM = M.GetDxilModule(); DM.GetDxilVersion(major, minor); OP *hlslOP = DM.GetOP(); // Split 64bit for shader model less than 6.3. if (major == 1 && minor <= 2) { for (auto F = M.functions().begin(); F != M.functions().end();) { Function *func = &*(F++); DXIL::OpCodeClass opClass; if (hlslOP->GetOpCodeClass(func, opClass)) { if (opClass == DXIL::OpCodeClass::RawBufferLoad) { Type *ETy = OP::GetOverloadType(DXIL::OpCode::RawBufferLoad, func); bool is64 = ETy->isDoubleTy() || ETy == Type::getInt64Ty(ETy->getContext()); if (is64) { ReplaceRawBufferLoad64Bit(func, ETy, M); func->eraseFromParent(); } } else if (opClass == DXIL::OpCodeClass::RawBufferStore) { Type *ETy = OP::GetOverloadType(DXIL::OpCode::RawBufferStore, func); bool is64 = ETy->isDoubleTy() || ETy == Type::getInt64Ty(ETy->getContext()); if (is64) { ReplaceRawBufferStore64Bit(func, ETy, M); func->eraseFromParent(); } } } } } if (major == 1 && minor < 2) { for (auto F = M.functions().begin(), E = M.functions().end(); F != E;) { Function *func = &*(F++); if (func->hasName()) { if (func->getName().startswith("dx.op.rawBufferLoad")) { ReplaceRawBufferLoad(func, M); func->eraseFromParent(); } else if (func->getName().startswith("dx.op.rawBufferStore")) { ReplaceRawBufferStore(func, M); func->eraseFromParent(); } } } } return true; } private: // Replace RawBufferLoad/Store to BufferLoad/Store for DXIL < 1.2 void ReplaceRawBufferLoad(Function *F, Module &M); void ReplaceRawBufferStore(Function *F, Module &M); void ReplaceRawBufferLoad64Bit(Function *F, Type *EltTy, Module &M); void ReplaceRawBufferStore64Bit(Function *F, Type *EltTy, Module &M); }; } // namespace void DxilTranslateRawBuffer::ReplaceRawBufferLoad(Function *F, Module &M) { dxilutil::ReplaceRawBufferLoadWithBufferLoad(F, M.GetDxilModule().GetOP()); } void DxilTranslateRawBuffer::ReplaceRawBufferLoad64Bit(Function *F, Type *EltTy, Module &M) { dxilutil::ReplaceRawBufferLoad64Bit(F, EltTy, M.GetDxilModule().GetOP()); } void DxilTranslateRawBuffer::ReplaceRawBufferStore(Function *F, Module &M) { dxilutil::ReplaceRawBufferStoreWithBufferStore(F, M.GetDxilModule().GetOP()); } void DxilTranslateRawBuffer::ReplaceRawBufferStore64Bit(Function *F, Type *ETy, Module &M) { dxilutil::ReplaceRawBufferStore64Bit(F, ETy, M.GetDxilModule().GetOP()); } char DxilTranslateRawBuffer::ID = 0; ModulePass *llvm::createDxilTranslateRawBuffer() { return new DxilTranslateRawBuffer(); } INITIALIZE_PASS(DxilTranslateRawBuffer, "hlsl-translate-dxil-raw-buffer", "Translate raw buffer load", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilPromoteResourcePasses.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilPromoteResourcePasses.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilCBuffer.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilResource.h" #include "dxc/DXIL/DxilResourceBase.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/HLSL/HLModule.h" #include "llvm/ADT/DenseSet.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" #include "llvm/Transforms/Utils/SSAUpdater.h" #include <unordered_set> #include <vector> using namespace llvm; using namespace hlsl; // Legalize resource use. // Map local or static global resource to global resource. // Require inline for static global resource. namespace { static const StringRef kStaticResourceLibErrorMsg = "non const static global resource use is disallowed in library exports."; class DxilPromoteStaticResources : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilPromoteStaticResources() : ModulePass(ID) {} StringRef getPassName() const override { return "DXIL Legalize Static Resource Use"; } bool runOnModule(Module &M) override { // Promote static global variables. return PromoteStaticGlobalResources(M); } private: bool PromoteStaticGlobalResources(Module &M); }; char DxilPromoteStaticResources::ID = 0; class DxilPromoteLocalResources : public FunctionPass { void getAnalysisUsage(AnalysisUsage &AU) const override; public: static char ID; // Pass identification, replacement for typeid explicit DxilPromoteLocalResources() : FunctionPass(ID) {} StringRef getPassName() const override { return "DXIL Legalize Resource Use"; } bool runOnFunction(Function &F) override { // Promote local resource first. return PromoteLocalResource(F); } private: bool PromoteLocalResource(Function &F); }; char DxilPromoteLocalResources::ID = 0; } // namespace void DxilPromoteLocalResources::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.setPreservesAll(); } bool DxilPromoteLocalResources::PromoteLocalResource(Function &F) { bool bModified = false; std::vector<AllocaInst *> Allocas; DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); AssumptionCache &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); BasicBlock &BB = F.getEntryBlock(); unsigned allocaSize = 0; while (1) { Allocas.clear(); // Find allocas that are safe to promote, by looking at all instructions in // the entry node for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { // Is it an alloca? if (dxilutil::IsHLSLObjectType( dxilutil::GetArrayEltTy(AI->getAllocatedType()))) { if (isAllocaPromotable(AI)) Allocas.push_back(AI); } } if (Allocas.empty()) break; // No update. // Report error and break. if (allocaSize == Allocas.size()) { // TODO: Add test for this instance of the error: "local resource not // guaranteed to map to unique global resource." No test currently // exists. dxilutil::EmitErrorOnContext(F.getContext(), dxilutil::kResourceMapErrorMsg); break; } allocaSize = Allocas.size(); PromoteMemToReg(Allocas, *DT, nullptr, &AC); bModified = true; } return bModified; } FunctionPass *llvm::createDxilPromoteLocalResources() { return new DxilPromoteLocalResources(); } INITIALIZE_PASS_BEGIN(DxilPromoteLocalResources, "hlsl-dxil-promote-local-resources", "DXIL promote local resource use", false, true) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_END(DxilPromoteLocalResources, "hlsl-dxil-promote-local-resources", "DXIL promote local resource use", false, true) bool DxilPromoteStaticResources::PromoteStaticGlobalResources(Module &M) { if (M.GetOrCreateHLModule().GetShaderModel()->IsLib()) { // Read/write to global static resource is disallowed for libraries: // Resource use needs to be resolved to a single real global resource, // but it may not be possible since any external function call may re-enter // at any other library export, which could modify the global static // between write and read. // While it could work for certain cases, describing the boundary at // the HLSL level is difficult, so at this point it's better to disallow. // example of what could work: // After inlining, exported functions must have writes to static globals // before reads, and must not have any external function calls between // writes and subsequent reads, such that the static global may be // optimized away for the exported function. for (auto &GV : M.globals()) { if (GV.getLinkage() == GlobalVariable::LinkageTypes::InternalLinkage && !GV.isConstant() && dxilutil::IsHLSLObjectType(dxilutil::GetArrayEltTy(GV.getType()))) { if (!GV.user_empty()) { if (Instruction *I = dyn_cast<Instruction>(*GV.user_begin())) { dxilutil::EmitErrorOnInstruction(I, kStaticResourceLibErrorMsg); break; } } } } return false; } bool bModified = false; std::set<GlobalVariable *> staticResources; for (auto &GV : M.globals()) { if (GV.getLinkage() == GlobalVariable::LinkageTypes::InternalLinkage && dxilutil::IsHLSLObjectType(dxilutil::GetArrayEltTy(GV.getType()))) { staticResources.insert(&GV); } } SSAUpdater SSA; SmallVector<Instruction *, 4> Insts; // Make sure every resource load has mapped to global variable. while (!staticResources.empty()) { bool bUpdated = false; for (auto it = staticResources.begin(); it != staticResources.end();) { GlobalVariable *GV = *(it++); // Build list of instructions to promote. for (User *U : GV->users()) { if (isa<LoadInst>(U) || isa<StoreInst>(U)) { Insts.emplace_back(cast<Instruction>(U)); } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { for (User *gepU : GEP->users()) { DXASSERT_NOMSG(isa<LoadInst>(gepU) || isa<StoreInst>(gepU)); if (isa<LoadInst>(gepU) || isa<StoreInst>(gepU)) Insts.emplace_back(cast<Instruction>(gepU)); } } else { DXASSERT(false, "Unhandled user of resource static global"); } } LoadAndStorePromoter(Insts, SSA).run(Insts); GV->removeDeadConstantUsers(); if (GV->user_empty()) { bUpdated = true; staticResources.erase(GV); } Insts.clear(); } if (!bUpdated) { // TODO: Add test for this instance of the error: "local resource not // guaranteed to map to unique global resource." No test currently // exists. dxilutil::EmitErrorOnContext(M.getContext(), dxilutil::kResourceMapErrorMsg); break; } bModified = true; } return bModified; } ModulePass *llvm::createDxilPromoteStaticResources() { return new DxilPromoteStaticResources(); } INITIALIZE_PASS(DxilPromoteStaticResources, "hlsl-dxil-promote-static-resources", "DXIL promote static resource use", false, false) // Mutate high-level resource type into handle. // This is used for SM 6.6+, on libraries only, where // CreateHandleForLib is eliminated, and high-level resource // types are only preserved in metadata for reflection purposes. namespace { // Overview // 1. collectCandidates - collect to MutateValSet // Start from resource global variable, function parameter/ret, alloca. // Propagate to all insts, GEP/ld/st/phi/select/called functions. // 2. mutateCandidates // Mutate all non-function value types. // Mutate functions by creating new function with new type, then // splice original function blocks into new function, and // replace old argument uses with new function's arguments. class DxilMutateResourceToHandle : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilMutateResourceToHandle() : ModulePass(ID) {} StringRef getPassName() const override { return "DXIL Mutate resource to handle"; } bool runOnModule(Module &M) override { if (M.HasHLModule()) { auto &HLM = M.GetHLModule(); if (!HLM.GetShaderModel()->IsSM66Plus()) return false; hlslOP = HLM.GetOP(); pTypeSys = &HLM.GetTypeSystem(); } else if (M.HasDxilModule()) { auto &DM = M.GetDxilModule(); if (!DM.GetShaderModel()->IsSM66Plus()) return false; hlslOP = DM.GetOP(); pTypeSys = &DM.GetTypeSystem(); } else { return false; } hdlTy = hlslOP->GetHandleType(); if (hlslOP->IsDxilOpUsed(DXIL::OpCode::CreateHandleForLib)) { createHandleForLibOnHandle = hlslOP->GetOpFunc(DXIL::OpCode::CreateHandleForLib, hdlTy); } collectCandidates(M); mutateCandidates(M); // Remove cast to handle. return !MutateValSet.empty(); } private: Type *mutateToHandleTy(Type *Ty, bool bResType = false); bool mutateTypesToHandleTy(SmallVector<Type *, 4> &Tys); void collectGlobalResource(DxilResourceBase *Res, SmallVector<Value *, 8> &WorkList); void collectAlloca(Function &F, SmallVector<Value *, 8> &WorkList); SmallVector<Value *, 8> collectHlslObjects(Module &M); void collectCandidates(Module &M); void mutateCandidates(Module &M); Type *hdlTy = nullptr; hlsl::OP *hlslOP = nullptr; Function *createHandleForLibOnHandle = nullptr; DxilTypeSystem *pTypeSys; DenseSet<Value *> MutateValSet; DenseMap<Type *, Type *> MutateTypeMap; }; char DxilMutateResourceToHandle::ID = 0; Type *DxilMutateResourceToHandle::mutateToHandleTy(Type *Ty, bool bResType) { auto it = MutateTypeMap.find(Ty); if (it != MutateTypeMap.end()) return it->second; Type *ResultTy = nullptr; if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) { SmallVector<unsigned, 2> nestedSize; Type *EltTy = Ty; while (ArrayType *NestAT = dyn_cast<ArrayType>(EltTy)) { nestedSize.emplace_back(NestAT->getNumElements()); EltTy = NestAT->getElementType(); } Type *mutatedTy = mutateToHandleTy(EltTy, bResType); if (mutatedTy == EltTy) { ResultTy = Ty; } else { Type *newAT = mutatedTy; for (auto it = nestedSize.rbegin(), E = nestedSize.rend(); it != E; ++it) newAT = ArrayType::get(newAT, *it); ResultTy = newAT; } } else if (PointerType *PT = dyn_cast<PointerType>(Ty)) { Type *EltTy = PT->getElementType(); Type *mutatedTy = mutateToHandleTy(EltTy, bResType); if (mutatedTy == EltTy) ResultTy = Ty; else ResultTy = mutatedTy->getPointerTo(PT->getAddressSpace()); } else if (dxilutil::IsHLSLResourceType(Ty)) { ResultTy = hdlTy; } else if (StructType *ST = dyn_cast<StructType>(Ty)) { if (bResType) { // For top-level resource GV type, the first struct type is the resource // type to be changed into handle. ResultTy = hdlTy; } else if (!ST->isOpaque()) { SmallVector<Type *, 4> Elts(ST->element_begin(), ST->element_end()); if (!mutateTypesToHandleTy(Elts)) { ResultTy = Ty; } else { ResultTy = StructType::create(Elts, ST->getName().str() + ".hdl"); } } else { // FIXME: Opaque type "ConstantBuffer" is only used for an empty cbuffer. // We should never use an empty cbuffer, and we should try to get rid of // the need for this type in the first place, like using nullptr for the // DxilResourceBase::m_pSymbol, since this resource should get deleted // before final dxil in all cases. if (ST->getName() == "ConstantBuffer") ResultTy = hdlTy; else ResultTy = Ty; } } else if (FunctionType *FT = dyn_cast<FunctionType>(Ty)) { Type *RetTy = FT->getReturnType(); SmallVector<Type *, 4> Args(FT->param_begin(), FT->param_end()); Type *mutatedRetTy = mutateToHandleTy(RetTy); if (!mutateTypesToHandleTy(Args) && RetTy == mutatedRetTy) { ResultTy = Ty; } else { ResultTy = FunctionType::get(mutatedRetTy, Args, FT->isVarArg()); } } else { ResultTy = Ty; } MutateTypeMap[Ty] = ResultTy; return ResultTy; } bool DxilMutateResourceToHandle::mutateTypesToHandleTy( SmallVector<Type *, 4> &Tys) { bool bMutated = false; for (size_t i = 0; i < Tys.size(); i++) { Type *Ty = Tys[i]; Type *mutatedTy = mutateToHandleTy(Ty); if (Ty != mutatedTy) { Tys[i] = mutatedTy; bMutated = true; } } return bMutated; } void DxilMutateResourceToHandle::collectGlobalResource( DxilResourceBase *Res, SmallVector<Value *, 8> &WorkList) { Value *GV = Res->GetGlobalSymbol(); // If already handle, don't overwrite HLSL type. // It's still posible that load users have a wrong type (invalid IR) due to // linking mixed targets. But in that case, we need to start at the // non-handle overloads of CreateHandleForLib and mutate/rewrite from there. // That's because we may have an already translated GV, but some load and // CreateHandleForLib calls use the wrong type from linked code. Type *MTy = mutateToHandleTy(GV->getType(), /*bResType*/ true); if (GV->getType() != MTy) { // Save hlsl type before mutate to handle. Res->SetHLSLType(GV->getType()); WorkList.emplace_back(GV); } } void DxilMutateResourceToHandle::collectAlloca( Function &F, SmallVector<Value *, 8> &WorkList) { if (F.isDeclaration()) return; for (Instruction &I : F.getEntryBlock()) { AllocaInst *AI = dyn_cast<AllocaInst>(&I); if (!AI) continue; Type *Ty = AI->getType(); Type *MTy = mutateToHandleTy(Ty); if (Ty == MTy) continue; WorkList.emplace_back(AI); } } } // namespace SmallVector<Value *, 8> DxilMutateResourceToHandle::collectHlslObjects(Module &M) { // Add all global/function/argument/alloca has resource type. SmallVector<Value *, 8> WorkList; // Global resources. if (M.HasHLModule()) { auto &HLM = M.GetHLModule(); for (auto &Res : HLM.GetCBuffers()) { collectGlobalResource(Res.get(), WorkList); } for (auto &Res : HLM.GetSRVs()) { collectGlobalResource(Res.get(), WorkList); } for (auto &Res : HLM.GetUAVs()) { collectGlobalResource(Res.get(), WorkList); } for (auto &Res : HLM.GetSamplers()) { collectGlobalResource(Res.get(), WorkList); } } else { auto &DM = M.GetDxilModule(); for (auto &Res : DM.GetCBuffers()) { collectGlobalResource(Res.get(), WorkList); } for (auto &Res : DM.GetSRVs()) { collectGlobalResource(Res.get(), WorkList); } for (auto &Res : DM.GetUAVs()) { collectGlobalResource(Res.get(), WorkList); } for (auto &Res : DM.GetSamplers()) { collectGlobalResource(Res.get(), WorkList); } } // Assume this is after SROA so no struct for global/alloca. // Functions. for (Function &F : M) { if (hlslOP && hlslOP->IsDxilOpFunc(&F)) { DXIL::OpCodeClass OpcodeClass; if (hlslOP->GetOpCodeClass(&F, OpcodeClass)) { if (OpcodeClass == DXIL::OpCodeClass::CreateHandleForLib && &F != createHandleForLibOnHandle) { WorkList.emplace_back(&F); MutateTypeMap[F.getFunctionType()->getFunctionParamType(1)] = hdlTy; continue; } } } collectAlloca(F, WorkList); FunctionType *FT = F.getFunctionType(); FunctionType *MFT = cast<FunctionType>(mutateToHandleTy(FT)); if (FT == MFT) continue; WorkList.emplace_back(&F); // Check args. for (Argument &Arg : F.args()) { Type *Ty = Arg.getType(); Type *MTy = mutateToHandleTy(Ty); if (Ty == MTy) continue; WorkList.emplace_back(&Arg); } } // Static globals. for (GlobalVariable &GV : M.globals()) { if (!dxilutil::IsStaticGlobal(&GV)) continue; Type *Ty = dxilutil::GetArrayEltTy(GV.getValueType()); if (!dxilutil::IsHLSLObjectType(Ty)) continue; WorkList.emplace_back(&GV); } return WorkList; } void DxilMutateResourceToHandle::collectCandidates(Module &M) { SmallVector<Value *, 8> WorkList = collectHlslObjects(M); // Propagate candidates. while (!WorkList.empty()) { Value *V = WorkList.pop_back_val(); MutateValSet.insert(V); for (User *U : V->users()) { // collect in a user. SmallVector<Value *, 2> newCandidates; // Should only used by ld/st/sel/phi/gep/call. if (LoadInst *LI = dyn_cast<LoadInst>(U)) { newCandidates.emplace_back(LI); } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { Value *Ptr = SI->getPointerOperand(); Value *Val = SI->getValueOperand(); if (V == Ptr) newCandidates.emplace_back(Val); else newCandidates.emplace_back(Ptr); } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { // If result type of GEP not related to resource type, skip. Type *Ty = GEP->getType(); Type *MTy = mutateToHandleTy(Ty); if (MTy == Ty) { // Don't recurse, but still need to mutate GEP. MutateValSet.insert(GEP); continue; } newCandidates.emplace_back(GEP); } else if (PHINode *Phi = dyn_cast<PHINode>(U)) { // Propagate all operands. newCandidates.emplace_back(Phi); for (Use &PhiOp : Phi->incoming_values()) { if (V == PhiOp) continue; newCandidates.emplace_back(PhiOp); } } else if (SelectInst *Sel = dyn_cast<SelectInst>(U)) { // Propagate other result. newCandidates.emplace_back(Sel); Value *TrueV = Sel->getTrueValue(); Value *FalseV = Sel->getFalseValue(); if (TrueV == V) newCandidates.emplace_back(FalseV); else newCandidates.emplace_back(TrueV); } else if (BitCastOperator *BCO = dyn_cast<BitCastOperator>(U)) { // Make sure only used for lifetime intrinsic. for (User *BCUser : BCO->users()) { if (ConstantArray *CA = dyn_cast<ConstantArray>(BCUser)) { // For llvm.used. if (CA->hasOneUse()) { Value *CAUser = CA->user_back(); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CAUser)) { if (GV->getName() == "llvm.used") continue; } } else if (CA->user_empty()) { continue; } } CallInst *CI = cast<CallInst>(BCUser); Function *F = CI->getCalledFunction(); Intrinsic::ID ID = F->getIntrinsicID(); if (ID != Intrinsic::lifetime_start && ID != Intrinsic::lifetime_end) { DXASSERT(false, "unexpected resource object user"); } } } else { CallInst *CI = cast<CallInst>(U); Type *Ty = CI->getType(); Type *MTy = mutateToHandleTy(Ty); if (Ty != MTy) newCandidates.emplace_back(CI); SmallVector<Value *, 4> Args(CI->arg_operands().begin(), CI->arg_operands().end()); for (Value *Arg : Args) { if (Arg == V) continue; Type *Ty = Arg->getType(); Type *MTy = mutateToHandleTy(Ty); if (Ty == MTy) continue; newCandidates.emplace_back(Arg); } } for (Value *Val : newCandidates) { // New candidate find. if (MutateValSet.insert(Val).second) { WorkList.emplace_back(Val); } } } } } void DxilMutateResourceToHandle::mutateCandidates(Module &M) { SmallVector<Function *, 2> CandidateFns; for (Value *V : MutateValSet) { if (Function *F = dyn_cast<Function>(V)) { CandidateFns.emplace_back(F); continue; } Type *Ty = V->getType(); Type *MTy = mutateToHandleTy(Ty); if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { AI->setAllocatedType(MTy->getPointerElementType()); } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) { Type *MResultEltTy = mutateToHandleTy(GEP->getResultElementType()); GEP->setResultElementType(MResultEltTy); Type *MSrcEltTy = mutateToHandleTy(GEP->getSourceElementType()); GEP->setSourceElementType(MSrcEltTy); } else if (GEPOperator *GEPO = dyn_cast<GEPOperator>(V)) { // GEP operator not support setSourceElementType. // Create a new GEP here. Constant *C = cast<Constant>(GEPO->getPointerOperand()); IRBuilder<> B(C->getContext()); // Make sure C is mutated so the GEP get correct sourceElementType. C->mutateType(mutateToHandleTy(C->getType())); // Collect user of GEPs, then replace all use with undef. SmallVector<Use *, 2> Uses; for (Use &U : GEPO->uses()) { Uses.emplace_back(&U); } SmallVector<Value *, 2> idxList(GEPO->idx_begin(), GEPO->idx_end()); Type *Ty = GEPO->getType(); GEPO->replaceAllUsesWith(UndefValue::get(Ty)); StringRef Name = GEPO->getName(); // GO and newGO will be same constant except has different // sourceElementType. ConstantMap think they're the same constant. Have to // remove GO first before create newGO. C->removeDeadConstantUsers(); Value *newGO = B.CreateGEP(C, idxList, Name); // update uses. for (Use *U : Uses) { U->set(newGO); } continue; } V->mutateType(MTy); } // Mutate functions. for (Function *F : CandidateFns) { Function *MF = nullptr; if (hlslOP) { if (hlslOP->IsDxilOpFunc(F)) { DXIL::OpCodeClass OpcodeClass; if (hlslOP->GetOpCodeClass(F, OpcodeClass)) { if (OpcodeClass == DXIL::OpCodeClass::CreateHandleForLib) { MF = createHandleForLibOnHandle; } } } } if (hlsl::GetHLOpcodeGroup(F) == HLOpcodeGroup::HLCast) { // Eliminate pass-through cast for (auto it = F->user_begin(); it != F->user_end();) { CallInst *CI = cast<CallInst>(*(it++)); CI->replaceAllUsesWith(CI->getArgOperand(1)); CI->eraseFromParent(); } continue; } if (!MF) { FunctionType *FT = F->getFunctionType(); FunctionType *MFT = cast<FunctionType>(MutateTypeMap[FT]); MF = Function::Create(MFT, F->getLinkage(), "", &M); MF->takeName(F); // Copy calling conv. MF->setCallingConv(F->getCallingConv()); // Copy attributes. AttributeSet AS = F->getAttributes(); MF->setAttributes(AS); // Annotation. if (DxilFunctionAnnotation *FnAnnot = pTypeSys->GetFunctionAnnotation(F)) { DxilFunctionAnnotation *newFnAnnot = pTypeSys->AddFunctionAnnotation(MF); DxilParameterAnnotation &RetAnnot = newFnAnnot->GetRetTypeAnnotation(); RetAnnot = FnAnnot->GetRetTypeAnnotation(); for (unsigned i = 0; i < FnAnnot->GetNumParameters(); i++) { newFnAnnot->GetParameterAnnotation(i) = FnAnnot->GetParameterAnnotation(i); } } // Update function debug info. if (DISubprogram *funcDI = getDISubprogram(F)) funcDI->replaceFunction(MF); } for (auto it = F->user_begin(); it != F->user_end();) { CallInst *CI = cast<CallInst>(*(it++)); CI->setCalledFunction(MF); } if (F->isDeclaration()) { F->eraseFromParent(); continue; } // Take body of F. // Splice the body of the old function right into the new function. MF->getBasicBlockList().splice(MF->begin(), F->getBasicBlockList()); // Replace use of arg. auto argIt = F->arg_begin(); for (auto MArgIt = MF->arg_begin(); MArgIt != MF->arg_end();) { Argument *Arg = (argIt++); Argument *MArg = (MArgIt++); Arg->replaceAllUsesWith(MArg); } } } ModulePass *llvm::createDxilMutateResourceToHandlePass() { return new DxilMutateResourceToHandle(); } INITIALIZE_PASS(DxilMutateResourceToHandle, "hlsl-dxil-resources-to-handle", "Mutate resource to handle", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilLinker.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilLinker.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/DxilLinker.h" #include "dxc/DXIL/DxilCBuffer.h" #include "dxc/DXIL/DxilEntryProps.h" #include "dxc/DXIL/DxilFunctionProps.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilResource.h" #include "dxc/DXIL/DxilSampler.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/Support/Global.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" #include "llvm/IR/Constants.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Cloning.h" #include <memory> #include <vector> #include "dxc/DxilContainer/DxilContainer.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/IR/LLVMContext.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/Scalar.h" #include "dxc/HLSL/ComputeViewIdState.h" #include "dxc/HLSL/DxilExportMap.h" using namespace llvm; using namespace hlsl; namespace { void CollectUsedFunctions(Constant *C, llvm::SetVector<Function *> &funcSet) { for (User *U : C->users()) { if (Instruction *I = dyn_cast<Instruction>(U)) { funcSet.insert(I->getParent()->getParent()); } else { Constant *CU = cast<Constant>(U); CollectUsedFunctions(CU, funcSet); } } } template <class T> void AddResourceMap( const std::vector<std::unique_ptr<T>> &resTab, DXIL::ResourceClass resClass, llvm::MapVector<const llvm::Constant *, DxilResourceBase *> &resMap, DxilModule &DM) { for (auto &Res : resTab) { resMap[Res->GetGlobalSymbol()] = Res.get(); } } void CloneFunction(Function *F, Function *NewF, ValueToValueMapTy &vmap, hlsl::DxilTypeSystem *TypeSys = nullptr, hlsl::DxilTypeSystem *SrcTypeSys = nullptr) { SmallVector<ReturnInst *, 2> Returns; // Map params. auto paramIt = NewF->arg_begin(); for (Argument &param : F->args()) { vmap[&param] = (paramIt++); } llvm::CloneFunctionInto(NewF, F, vmap, /*ModuleLevelChanges*/ true, Returns); if (TypeSys) { if (SrcTypeSys == nullptr) SrcTypeSys = TypeSys; TypeSys->CopyFunctionAnnotation(NewF, F, *SrcTypeSys); } // Remove params from vmap. for (Argument &param : F->args()) { vmap.erase(&param); } } } // namespace namespace { struct DxilFunctionLinkInfo { DxilFunctionLinkInfo(llvm::Function *F); llvm::Function *func; // SetVectors for deterministic iteration llvm::SetVector<llvm::Function *> usedFunctions; llvm::SetVector<llvm::GlobalVariable *> usedGVs; }; // Library to link. class DxilLib { public: DxilLib(std::unique_ptr<llvm::Module> pModule); virtual ~DxilLib() {} bool HasFunction(std::string &name); llvm::StringMap<std::unique_ptr<DxilFunctionLinkInfo>> &GetFunctionTable() { return m_functionNameMap; } bool IsInitFunc(llvm::Function *F); bool IsEntry(llvm::Function *F); bool IsResourceGlobal(const llvm::Constant *GV); DxilResourceBase *GetResource(const llvm::Constant *GV); DxilModule &GetDxilModule() { return m_DM; } void LazyLoadFunction(Function *F); void BuildGlobalUsage(); void CollectUsedInitFunctions(SetVector<StringRef> &addedFunctionSet, SmallVector<StringRef, 4> &workList); void FixIntrinsicOverloads(); private: std::unique_ptr<llvm::Module> m_pModule; DxilModule &m_DM; // Map from name to Link info for extern functions. llvm::StringMap<std::unique_ptr<DxilFunctionLinkInfo>> m_functionNameMap; llvm::SmallPtrSet<llvm::Function *, 4> m_entrySet; // Map from resource link global to resource. MapVector for deterministic // iteration. llvm::MapVector<const llvm::Constant *, DxilResourceBase *> m_resourceMap; // Set of initialize functions for global variable. SetVector for // deterministic iteration. llvm::SetVector<llvm::Function *> m_initFuncSet; }; struct DxilLinkJob; class DxilLinkerImpl : public hlsl::DxilLinker { public: DxilLinkerImpl(LLVMContext &Ctx, unsigned valMajor, unsigned valMinor) : DxilLinker(Ctx, valMajor, valMinor) {} virtual ~DxilLinkerImpl() {} bool HasLibNameRegistered(StringRef name) override; bool RegisterLib(StringRef name, std::unique_ptr<llvm::Module> pModule, std::unique_ptr<llvm::Module> pDebugModule) override; bool AttachLib(StringRef name) override; bool DetachLib(StringRef name) override; void DetachAll() override; std::unique_ptr<llvm::Module> Link(StringRef entry, StringRef profile, dxilutil::ExportMap &exportMap) override; private: bool AttachLib(DxilLib *lib); bool DetachLib(DxilLib *lib); bool AddFunctions(SmallVector<StringRef, 4> &workList, SetVector<DxilLib *> &libSet, SetVector<StringRef> &addedFunctionSet, DxilLinkJob &linkJob, bool bLazyLoadDone, bool bAllowFuncionDecls); // Attached libs to link. std::unordered_set<DxilLib *> m_attachedLibs; // Owner of all DxilLib. StringMap<std::unique_ptr<DxilLib>> m_LibMap; llvm::StringMap<std::pair<DxilFunctionLinkInfo *, DxilLib *>> m_functionNameMap; }; } // namespace //------------------------------------------------------------------------------ // // DxilFunctionLinkInfo methods. // DxilFunctionLinkInfo::DxilFunctionLinkInfo(Function *F) : func(F) { DXASSERT_NOMSG(F); } //------------------------------------------------------------------------------ // // DxilLib methods. // DxilLib::DxilLib(std::unique_ptr<llvm::Module> pModule) : m_pModule(std::move(pModule)), m_DM(m_pModule->GetOrCreateDxilModule()) { Module &M = *m_pModule; const std::string MID = (Twine(M.getModuleIdentifier()) + ".").str(); // Collect function defines. for (Function &F : M.functions()) { if (F.isDeclaration()) continue; if (F.getLinkage() == GlobalValue::LinkageTypes::InternalLinkage) { // Add prefix to internal function. F.setName(MID + F.getName()); } m_functionNameMap[F.getName()] = llvm::make_unique<DxilFunctionLinkInfo>(&F); if (m_DM.IsEntry(&F)) m_entrySet.insert(&F); } // Update internal global name. for (GlobalVariable &GV : M.globals()) { if (GV.getLinkage() == GlobalValue::LinkageTypes::InternalLinkage) { // Add prefix to internal global. GV.setName(MID + GV.getName()); } } } void DxilLib::FixIntrinsicOverloads() { // Fix DXIL overload name collisions that may be caused by name // collisions between dxil ops with different overload types, // when those types may have had the same name in the original // modules. m_DM.GetOP()->FixOverloadNames(); } void DxilLib::LazyLoadFunction(Function *F) { DXASSERT(m_functionNameMap.count(F->getName()), "else invalid Function"); DxilFunctionLinkInfo *linkInfo = m_functionNameMap[F->getName()].get(); std::error_code EC = F->materialize(); DXASSERT_LOCALVAR(EC, !EC, "else fail to materialize"); // Build used functions for F. for (auto &BB : F->getBasicBlockList()) { for (auto &I : BB.getInstList()) { if (CallInst *CI = dyn_cast<CallInst>(&I)) { linkInfo->usedFunctions.insert(CI->getCalledFunction()); } } } if (m_DM.HasDxilFunctionProps(F)) { DxilFunctionProps &props = m_DM.GetDxilFunctionProps(F); if (props.IsHS()) { // Add patch constant function to usedFunctions of entry. Function *patchConstantFunc = props.ShaderProps.HS.patchConstantFunc; linkInfo->usedFunctions.insert(patchConstantFunc); } } // Used globals will be build before link. } void DxilLib::BuildGlobalUsage() { Module &M = *m_pModule; // Collect init functions for static globals. if (GlobalVariable *Ctors = M.getGlobalVariable("llvm.global_ctors")) { if (ConstantArray *CA = dyn_cast<ConstantArray>(Ctors->getInitializer())) { for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { if (isa<ConstantAggregateZero>(*i)) continue; ConstantStruct *CS = cast<ConstantStruct>(*i); if (isa<ConstantPointerNull>(CS->getOperand(1))) continue; // Must have a function or null ptr. if (!isa<Function>(CS->getOperand(1))) continue; Function *Ctor = cast<Function>(CS->getOperand(1)); assert(Ctor->getReturnType()->isVoidTy() && Ctor->arg_size() == 0 && "function type must be void (void)"); // Add Ctor. m_initFuncSet.insert(Ctor); LazyLoadFunction(Ctor); } } } // Build used globals. for (GlobalVariable &GV : M.globals()) { llvm::SetVector<Function *> funcSet; CollectUsedFunctions(&GV, funcSet); for (Function *F : funcSet) { DXASSERT(m_functionNameMap.count(F->getName()), "must exist in table"); DxilFunctionLinkInfo *linkInfo = m_functionNameMap[F->getName()].get(); linkInfo->usedGVs.insert(&GV); } } // Build resource map. AddResourceMap(m_DM.GetUAVs(), DXIL::ResourceClass::UAV, m_resourceMap, m_DM); AddResourceMap(m_DM.GetSRVs(), DXIL::ResourceClass::SRV, m_resourceMap, m_DM); AddResourceMap(m_DM.GetCBuffers(), DXIL::ResourceClass::CBuffer, m_resourceMap, m_DM); AddResourceMap(m_DM.GetSamplers(), DXIL::ResourceClass::Sampler, m_resourceMap, m_DM); } void DxilLib::CollectUsedInitFunctions(SetVector<StringRef> &addedFunctionSet, SmallVector<StringRef, 4> &workList) { // Add init functions to used functions. for (Function *Ctor : m_initFuncSet) { DXASSERT(m_functionNameMap.count(Ctor->getName()), "must exist in internal table"); DxilFunctionLinkInfo *linkInfo = m_functionNameMap[Ctor->getName()].get(); // If function other than Ctor used GV of Ctor. // Add Ctor to usedFunctions for it. for (GlobalVariable *GV : linkInfo->usedGVs) { llvm::SetVector<Function *> funcSet; CollectUsedFunctions(GV, funcSet); bool bAdded = false; for (Function *F : funcSet) { if (F == Ctor) continue; // If F is added for link, add init func to workList. if (addedFunctionSet.count(F->getName())) { workList.emplace_back(Ctor->getName()); bAdded = true; break; } } if (bAdded) break; } } } bool DxilLib::HasFunction(std::string &name) { return m_functionNameMap.count(name); } bool DxilLib::IsEntry(llvm::Function *F) { return m_entrySet.count(F); } bool DxilLib::IsInitFunc(llvm::Function *F) { return m_initFuncSet.count(F); } bool DxilLib::IsResourceGlobal(const llvm::Constant *GV) { return m_resourceMap.count(GV); } DxilResourceBase *DxilLib::GetResource(const llvm::Constant *GV) { if (IsResourceGlobal(GV)) return m_resourceMap[GV]; else return nullptr; } namespace { // Create module from link defines. struct DxilLinkJob { DxilLinkJob(LLVMContext &Ctx, dxilutil::ExportMap &exportMap, unsigned valMajor, unsigned valMinor) : m_ctx(Ctx), m_exportMap(exportMap), m_valMajor(valMajor), m_valMinor(valMinor) {} std::unique_ptr<llvm::Module> Link(std::pair<DxilFunctionLinkInfo *, DxilLib *> &entryLinkPair, const ShaderModel *pSM); std::unique_ptr<llvm::Module> LinkToLib(const ShaderModel *pSM); void StripDeadDebugInfo(llvm::Module &M); // Fix issues when link to different shader model. void FixShaderModelMismatch(llvm::Module &M); void RunPreparePass(llvm::Module &M); void AddFunction(std::pair<DxilFunctionLinkInfo *, DxilLib *> &linkPair); void AddFunction(llvm::Function *F); private: void LinkNamedMDNodes(Module *pM, ValueToValueMapTy &vmap); void AddFunctionDecls(Module *pM); bool AddGlobals(DxilModule &DM, ValueToValueMapTy &vmap); void EmitCtorListForLib(Module *pM); void CloneFunctions(ValueToValueMapTy &vmap); void AddFunctions(DxilModule &DM, ValueToValueMapTy &vmap); bool AddResource(DxilResourceBase *res, llvm::GlobalVariable *GV); void AddResourceToDM(DxilModule &DM); llvm::MapVector<DxilFunctionLinkInfo *, DxilLib *> m_functionDefs; // Function decls, in order added. llvm::MapVector<llvm::StringRef, std::pair<llvm::SmallPtrSet<llvm::FunctionType *, 2>, llvm::SmallVector<llvm::Function *, 2>>> m_functionDecls; // New created functions, in order added. llvm::MapVector<llvm::StringRef, llvm::Function *> m_newFunctions; // New created globals, in order added. llvm::MapVector<llvm::StringRef, llvm::GlobalVariable *> m_newGlobals; // Map for resource, ordered by name. std::map<llvm::StringRef, std::pair<DxilResourceBase *, llvm::GlobalVariable *>> m_resourceMap; LLVMContext &m_ctx; dxilutil::ExportMap &m_exportMap; unsigned m_valMajor, m_valMinor; }; } // namespace namespace { const char kUndefFunction[] = "Cannot find definition of function "; const char kRedefineFunction[] = "Definition already exists for function "; const char kRedefineGlobal[] = "Definition already exists for global variable "; const char kShaderKindMismatch[] = "Profile mismatch between entry function and target profile:"; const char kNoEntryProps[] = "Cannot find function property for entry function "; const char kRedefineResource[] = "Resource already exists as "; const char kInvalidValidatorVersion[] = "Validator version does not support target profile "; const char kExportNameCollision[] = "Export name collides with another export: "; const char kExportFunctionMissing[] = "Could not find target for export: "; const char kNoFunctionsToExport[] = "Library has no functions to export"; } // namespace //------------------------------------------------------------------------------ // // DxilLinkJob methods. // namespace { // Helper function to check type match. bool IsMatchedType(Type *Ty0, Type *Ty); StringRef RemoveNameSuffix(StringRef Name) { size_t DotPos = Name.rfind('.'); if (DotPos != StringRef::npos && Name.back() != '.' && isdigit(static_cast<unsigned char>(Name[DotPos + 1]))) Name = Name.substr(0, DotPos); return Name; } bool IsMatchedStructType(StructType *ST0, StructType *ST) { StringRef Name0 = RemoveNameSuffix(ST0->getName()); StringRef Name = RemoveNameSuffix(ST->getName()); if (Name0 != Name) return false; if (ST0->getNumElements() != ST->getNumElements()) return false; if (ST0->isLayoutIdentical(ST)) return true; for (unsigned i = 0; i < ST->getNumElements(); i++) { Type *Ty = ST->getElementType(i); Type *Ty0 = ST0->getElementType(i); if (!IsMatchedType(Ty, Ty0)) return false; } return true; } bool IsMatchedArrayType(ArrayType *AT0, ArrayType *AT) { if (AT0->getNumElements() != AT->getNumElements()) return false; return IsMatchedType(AT0->getElementType(), AT->getElementType()); } bool IsMatchedType(Type *Ty0, Type *Ty) { if (Ty0->isStructTy() && Ty->isStructTy()) { StructType *ST0 = cast<StructType>(Ty0); StructType *ST = cast<StructType>(Ty); return IsMatchedStructType(ST0, ST); } if (Ty0->isArrayTy() && Ty->isArrayTy()) { ArrayType *AT0 = cast<ArrayType>(Ty0); ArrayType *AT = cast<ArrayType>(Ty); return IsMatchedArrayType(AT0, AT); } if (Ty0->isPointerTy() && Ty->isPointerTy()) { if (Ty0->getPointerAddressSpace() != Ty->getPointerAddressSpace()) return false; return IsMatchedType(Ty0->getPointerElementType(), Ty->getPointerElementType()); } return Ty0 == Ty; } } // namespace bool DxilLinkJob::AddResource(DxilResourceBase *res, llvm::GlobalVariable *GV) { if (m_resourceMap.count(res->GetGlobalName())) { DxilResourceBase *res0 = m_resourceMap[res->GetGlobalName()].first; Type *Ty0 = res0->GetHLSLType()->getPointerElementType(); Type *Ty = res->GetHLSLType()->getPointerElementType(); // Make sure res0 match res. bool bMatch = IsMatchedType(Ty0, Ty); if (!bMatch) { // Report error. dxilutil::EmitErrorOnGlobalVariable( m_ctx, dyn_cast<GlobalVariable>(res->GetGlobalSymbol()), Twine(kRedefineResource) + res->GetResClassName() + " for " + res->GetGlobalName()); return false; } } else { m_resourceMap[res->GetGlobalName()] = std::make_pair(res, GV); } return true; } void DxilLinkJob::AddResourceToDM(DxilModule &DM) { for (auto &it : m_resourceMap) { DxilResourceBase *res = it.second.first; GlobalVariable *GV = it.second.second; unsigned ID = 0; DxilResourceBase *basePtr = nullptr; switch (res->GetClass()) { case DXIL::ResourceClass::UAV: { std::unique_ptr<DxilResource> pUAV = llvm::make_unique<DxilResource>(); DxilResource *ptr = pUAV.get(); // Copy the content. *ptr = *(static_cast<DxilResource *>(res)); ID = DM.AddUAV(std::move(pUAV)); basePtr = &DM.GetUAV(ID); } break; case DXIL::ResourceClass::SRV: { std::unique_ptr<DxilResource> pSRV = llvm::make_unique<DxilResource>(); DxilResource *ptr = pSRV.get(); // Copy the content. *ptr = *(static_cast<DxilResource *>(res)); ID = DM.AddSRV(std::move(pSRV)); basePtr = &DM.GetSRV(ID); } break; case DXIL::ResourceClass::CBuffer: { std::unique_ptr<DxilCBuffer> pCBuf = llvm::make_unique<DxilCBuffer>(); DxilCBuffer *ptr = pCBuf.get(); // Copy the content. *ptr = *(static_cast<DxilCBuffer *>(res)); ID = DM.AddCBuffer(std::move(pCBuf)); basePtr = &DM.GetCBuffer(ID); } break; case DXIL::ResourceClass::Sampler: { std::unique_ptr<DxilSampler> pSampler = llvm::make_unique<DxilSampler>(); DxilSampler *ptr = pSampler.get(); // Copy the content. *ptr = *(static_cast<DxilSampler *>(res)); ID = DM.AddSampler(std::move(pSampler)); basePtr = &DM.GetSampler(ID); } break; default: DXASSERT(false, "Invalid resource class"); break; } // Update ID. basePtr->SetID(ID); basePtr->SetGlobalSymbol(GV); DM.GetLLVMUsed().push_back(GV); } // Prevent global vars used for resources from being deleted through // optimizations while we still have hidden uses (pointers in resource // vectors). DM.EmitLLVMUsed(); } void DxilLinkJob::LinkNamedMDNodes(Module *pM, ValueToValueMapTy &vmap) { SetVector<Module *> moduleSet; for (auto &it : m_functionDefs) { DxilLib *pLib = it.second; moduleSet.insert(pLib->GetDxilModule().GetModule()); } // Link normal NamedMDNode. // TODO: skip duplicate operands. for (Module *pSrcM : moduleSet) { const NamedMDNode *pSrcModFlags = pSrcM->getModuleFlagsMetadata(); for (const NamedMDNode &NMD : pSrcM->named_metadata()) { // Don't link module flags here. Do them separately. if (&NMD == pSrcModFlags) continue; // Skip dxil metadata which will be regenerated. if (DxilMDHelper::IsKnownNamedMetaData(NMD)) continue; NamedMDNode *DestNMD = pM->getOrInsertNamedMetadata(NMD.getName()); // Add Src elements into Dest node. for (const MDNode *op : NMD.operands()) DestNMD->addOperand(MapMetadata(op, vmap, RF_None, /*TypeMap*/ nullptr, /*ValMaterializer*/ nullptr)); } } // Link mod flags. SetVector<MDNode *> flagSet; for (Module *pSrcM : moduleSet) { NamedMDNode *pSrcModFlags = pSrcM->getModuleFlagsMetadata(); if (pSrcModFlags) { for (MDNode *flag : pSrcModFlags->operands()) { flagSet.insert(flag); } } } // TODO: check conflict in flags. if (!flagSet.empty()) { NamedMDNode *ModFlags = pM->getOrInsertModuleFlagsMetadata(); for (MDNode *flag : flagSet) { ModFlags->addOperand(flag); } } } void DxilLinkJob::AddFunctionDecls(Module *pM) { for (auto &it : m_functionDecls) { for (auto F : it.second.second) { Function *NewF = pM->getFunction(F->getName()); if (!NewF || F->getFunctionType() != NewF->getFunctionType()) { NewF = Function::Create(F->getFunctionType(), F->getLinkage(), F->getName(), pM); NewF->setAttributes(F->getAttributes()); } m_newFunctions[F->getName()] = NewF; } } } bool DxilLinkJob::AddGlobals(DxilModule &DM, ValueToValueMapTy &vmap) { DxilTypeSystem &typeSys = DM.GetTypeSystem(); Module *pM = DM.GetModule(); bool bSuccess = true; for (auto &it : m_functionDefs) { DxilFunctionLinkInfo *linkInfo = it.first; DxilLib *pLib = it.second; DxilModule &tmpDM = pLib->GetDxilModule(); DxilTypeSystem &tmpTypeSys = tmpDM.GetTypeSystem(); for (GlobalVariable *GV : linkInfo->usedGVs) { // Skip added globals. if (m_newGlobals.count(GV->getName())) { if (vmap.find(GV) == vmap.end()) { if (DxilResourceBase *res = pLib->GetResource(GV)) { // For resource of same name, if class and type match, just map to // same NewGV. GlobalVariable *NewGV = m_newGlobals[GV->getName()]; if (AddResource(res, NewGV)) { vmap[GV] = NewGV; } else { bSuccess = false; } continue; } // Redefine of global. dxilutil::EmitErrorOnGlobalVariable( m_ctx, GV, Twine(kRedefineGlobal) + GV->getName()); bSuccess = false; } continue; } Constant *Initializer = nullptr; if (GV->hasInitializer()) Initializer = GV->getInitializer(); Type *Ty = GV->getType()->getElementType(); GlobalVariable *NewGV = new GlobalVariable( *pM, Ty, GV->isConstant(), GV->getLinkage(), Initializer, GV->getName(), /*InsertBefore*/ nullptr, GV->getThreadLocalMode(), GV->getType()->getAddressSpace(), GV->isExternallyInitialized()); m_newGlobals[GV->getName()] = NewGV; vmap[GV] = NewGV; typeSys.CopyTypeAnnotation(Ty, tmpTypeSys); if (DxilResourceBase *res = pLib->GetResource(GV)) { bSuccess &= AddResource(res, NewGV); typeSys.CopyTypeAnnotation(res->GetHLSLType(), tmpTypeSys); } } } return bSuccess; } void DxilLinkJob::CloneFunctions(ValueToValueMapTy &vmap) { for (auto &it : m_functionDefs) { DxilFunctionLinkInfo *linkInfo = it.first; Function *F = linkInfo->func; Function *NewF = m_newFunctions[F->getName()]; // Add dxil functions to vmap. for (Function *UsedF : linkInfo->usedFunctions) { if (!vmap.count(UsedF)) { // Extern function need match by name DXASSERT(m_newFunctions.count(UsedF->getName()), "Must have new function."); vmap[UsedF] = m_newFunctions[UsedF->getName()]; } } CloneFunction(F, NewF, vmap); } } void DxilLinkJob::AddFunctions(DxilModule &DM, ValueToValueMapTy &vmap) { DxilTypeSystem &typeSys = DM.GetTypeSystem(); Module *pM = DM.GetModule(); for (auto &it : m_functionDefs) { DxilFunctionLinkInfo *linkInfo = it.first; DxilLib *pLib = it.second; DxilModule &tmpDM = pLib->GetDxilModule(); DxilTypeSystem &tmpTypeSys = tmpDM.GetTypeSystem(); Function *F = linkInfo->func; Function *NewF = Function::Create(F->getFunctionType(), F->getLinkage(), F->getName(), pM); NewF->setAttributes(F->getAttributes()); if (!NewF->hasFnAttribute(llvm::Attribute::NoInline)) NewF->addFnAttr(llvm::Attribute::AlwaysInline); if (DxilFunctionAnnotation *funcAnnotation = tmpTypeSys.GetFunctionAnnotation(F)) { // Clone funcAnnotation to typeSys. typeSys.CopyFunctionAnnotation(NewF, F, tmpTypeSys); } // Add to function map. m_newFunctions[NewF->getName()] = NewF; vmap[F] = NewF; } } std::unique_ptr<Module> DxilLinkJob::Link(std::pair<DxilFunctionLinkInfo *, DxilLib *> &entryLinkPair, const ShaderModel *pSM) { Function *entryFunc = entryLinkPair.first->func; DxilModule &entryDM = entryLinkPair.second->GetDxilModule(); if (!entryDM.HasDxilFunctionProps(entryFunc)) { // Cannot get function props. dxilutil::EmitErrorOnFunction(m_ctx, entryFunc, Twine(kNoEntryProps) + entryFunc->getName()); return nullptr; } DxilFunctionProps props = entryDM.GetDxilFunctionProps(entryFunc); if (pSM->GetKind() != props.shaderKind) { // Shader kind mismatch. dxilutil::EmitErrorOnFunction( m_ctx, entryFunc, Twine(kShaderKindMismatch) + ShaderModel::GetKindName(pSM->GetKind()) + " and " + ShaderModel::GetKindName(props.shaderKind)); return nullptr; } // Create new module. std::unique_ptr<Module> pM = llvm::make_unique<Module>(entryFunc->getName(), entryDM.GetCtx()); // Set target. pM->setTargetTriple(entryDM.GetModule()->getTargetTriple()); // Add dxil operation functions before create DxilModule. AddFunctionDecls(pM.get()); // Create DxilModule. const bool bSkipInit = true; DxilModule &DM = pM->GetOrCreateDxilModule(bSkipInit); DM.SetShaderModel(pSM, entryDM.GetUseMinPrecision()); // Set Validator version. DM.SetValidatorVersion(m_valMajor, m_valMinor); ValueToValueMapTy vmap; // Add function AddFunctions(DM, vmap); // Set Entry Function *NewEntryFunc = m_newFunctions[entryFunc->getName()]; DM.SetEntryFunction(NewEntryFunc); DM.SetEntryFunctionName(entryFunc->getName()); DxilEntryPropsMap EntryPropMap; std::unique_ptr<DxilEntryProps> pProps = llvm::make_unique<DxilEntryProps>(entryDM.GetDxilEntryProps(entryFunc)); EntryPropMap[NewEntryFunc] = std::move(pProps); DM.ResetEntryPropsMap(std::move(EntryPropMap)); if (NewEntryFunc->hasFnAttribute(llvm::Attribute::AlwaysInline)) NewEntryFunc->removeFnAttr(llvm::Attribute::AlwaysInline); if (props.IsHS()) { Function *patchConstantFunc = props.ShaderProps.HS.patchConstantFunc; Function *newPatchConstantFunc = m_newFunctions[patchConstantFunc->getName()]; props.ShaderProps.HS.patchConstantFunc = newPatchConstantFunc; if (newPatchConstantFunc->hasFnAttribute(llvm::Attribute::AlwaysInline)) newPatchConstantFunc->removeFnAttr(llvm::Attribute::AlwaysInline); } // Set root sig if exist. if (!props.serializedRootSignature.empty()) { DM.ResetSerializedRootSignature(props.serializedRootSignature); props.serializedRootSignature.clear(); } // Set EntryProps DM.SetShaderProperties(&props); // Add global bool bSuccess = AddGlobals(DM, vmap); if (!bSuccess) return nullptr; // Clone functions. CloneFunctions(vmap); // Call global constrctor. IRBuilder<> Builder(dxilutil::FindAllocaInsertionPt(DM.GetEntryFunction())); for (auto &it : m_functionDefs) { DxilFunctionLinkInfo *linkInfo = it.first; DxilLib *pLib = it.second; // Skip constructor in entry lib which is already called for entries inside // entry lib. if (pLib == entryLinkPair.second) continue; Function *F = linkInfo->func; if (pLib->IsInitFunc(F)) { Function *NewF = m_newFunctions[F->getName()]; Builder.CreateCall(NewF); } } // Refresh intrinsic cache. DM.GetOP()->RefreshCache(); // Add resource to DM. // This should be after functions cloned. AddResourceToDM(DM); // Link metadata like debug info. LinkNamedMDNodes(pM.get(), vmap); RunPreparePass(*pM); return pM; } // Based on CodeGenModule::EmitCtorList. void DxilLinkJob::EmitCtorListForLib(Module *pM) { LLVMContext &Ctx = pM->getContext(); Type *VoidTy = Type::getVoidTy(Ctx); Type *Int32Ty = Type::getInt32Ty(Ctx); Type *VoidPtrTy = Type::getInt8PtrTy(Ctx); // Ctor function type is void()*. llvm::FunctionType *CtorFTy = llvm::FunctionType::get(VoidTy, false); llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy); // Get the type of a ctor entry, { i32, void ()*, i8* }. llvm::StructType *CtorStructTy = llvm::StructType::get( Int32Ty, llvm::PointerType::getUnqual(CtorFTy), VoidPtrTy, nullptr); // Construct the constructor and destructor arrays. SmallVector<llvm::Constant *, 8> Ctors; for (auto &it : m_functionDefs) { DxilFunctionLinkInfo *linkInfo = it.first; DxilLib *pLib = it.second; Function *F = linkInfo->func; if (pLib->IsInitFunc(F)) { Function *NewF = m_newFunctions[F->getName()]; llvm::Constant *S[] = {llvm::ConstantInt::get(Int32Ty, 65535, false), llvm::ConstantExpr::getBitCast(NewF, CtorPFTy), (llvm::Constant::getNullValue(VoidPtrTy))}; Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S)); } } if (!Ctors.empty()) { const StringRef GlobalName = "llvm.global_ctors"; llvm::ArrayType *AT = llvm::ArrayType::get(CtorStructTy, Ctors.size()); new llvm::GlobalVariable(*pM, AT, false, llvm::GlobalValue::AppendingLinkage, llvm::ConstantArray::get(AT, Ctors), GlobalName); } } std::unique_ptr<Module> DxilLinkJob::LinkToLib(const ShaderModel *pSM) { if (m_functionDefs.empty()) { dxilutil::EmitErrorOnContext(m_ctx, Twine(kNoFunctionsToExport)); return nullptr; } DxilLib *pLib = m_functionDefs.begin()->second; DxilModule &tmpDM = pLib->GetDxilModule(); // Create new module. std::unique_ptr<Module> pM = llvm::make_unique<Module>("merged_lib", tmpDM.GetCtx()); // Set target. pM->setTargetTriple(tmpDM.GetModule()->getTargetTriple()); // Add dxil operation functions and external decls before create DxilModule. AddFunctionDecls(pM.get()); // Create DxilModule. const bool bSkipInit = true; DxilModule &DM = pM->GetOrCreateDxilModule(bSkipInit); DM.SetShaderModel(pSM, tmpDM.GetUseMinPrecision()); // Set Validator version. DM.SetValidatorVersion(m_valMajor, m_valMinor); ValueToValueMapTy vmap; // Add function AddFunctions(DM, vmap); // Set DxilFunctionProps. DxilEntryPropsMap EntryPropMap; for (auto &it : m_functionDefs) { DxilFunctionLinkInfo *linkInfo = it.first; DxilLib *pLib = it.second; DxilModule &tmpDM = pLib->GetDxilModule(); Function *F = linkInfo->func; if (tmpDM.HasDxilEntryProps(F)) { Function *NewF = m_newFunctions[F->getName()]; DxilEntryProps &props = tmpDM.GetDxilEntryProps(F); std::unique_ptr<DxilEntryProps> pProps = llvm::make_unique<DxilEntryProps>(props); EntryPropMap[NewF] = std::move(pProps); } } DM.ResetEntryPropsMap(std::move(EntryPropMap)); // Add global bool bSuccess = AddGlobals(DM, vmap); if (!bSuccess) return nullptr; // Clone functions. CloneFunctions(vmap); // Refresh intrinsic cache. DM.GetOP()->RefreshCache(); // Add resource to DM. // This should be after functions cloned. AddResourceToDM(DM); // Link metadata like debug info. LinkNamedMDNodes(pM.get(), vmap); // Build global.ctors. EmitCtorListForLib(pM.get()); RunPreparePass(*pM); if (!m_exportMap.empty()) { m_exportMap.BeginProcessing(); DM.ClearDxilMetadata(*pM); for (auto it = pM->begin(); it != pM->end();) { Function *F = it++; if (F->isDeclaration()) continue; if (!m_exportMap.ProcessFunction(F, true)) { // Remove Function not in exportMap. DM.RemoveFunction(F); // Only erase function if user is empty. The function can still be // used by @llvm.global_ctors if (F->user_empty()) F->eraseFromParent(); } } if (!m_exportMap.EndProcessing()) { for (auto &name : m_exportMap.GetNameCollisions()) { std::string escaped; llvm::raw_string_ostream os(escaped); dxilutil::PrintEscapedString(name, os); dxilutil::EmitErrorOnContext(m_ctx, Twine(kExportNameCollision) + os.str()); } for (auto &name : m_exportMap.GetUnusedExports()) { std::string escaped; llvm::raw_string_ostream os(escaped); dxilutil::PrintEscapedString(name, os); dxilutil::EmitErrorOnContext(m_ctx, Twine(kExportFunctionMissing) + os.str()); } return nullptr; } // Rename the original, if necessary, then clone the rest for (auto &it : m_exportMap.GetFunctionRenames()) { Function *F = it.first; auto &renames = it.second; if (renames.empty()) continue; auto itName = renames.begin(); // Rename the original, if necessary, then clone the rest if (renames.find(F->getName()) == renames.end()) F->setName(*(itName++)); while (itName != renames.end()) { if (F->getName() != *itName) { Function *NewF = Function::Create( F->getFunctionType(), GlobalValue::LinkageTypes::ExternalLinkage, *itName, DM.GetModule()); ValueToValueMapTy vmap; CloneFunction(F, NewF, vmap, &DM.GetTypeSystem()); // add DxilFunctionProps if entry if (DM.HasDxilFunctionProps(F)) { DM.CloneDxilEntryProps(F, NewF); } } itName++; } } DM.EmitDxilMetadata(); } return pM; } void DxilLinkJob::AddFunction( std::pair<DxilFunctionLinkInfo *, DxilLib *> &linkPair) { m_functionDefs[linkPair.first] = linkPair.second; } void DxilLinkJob::AddFunction(llvm::Function *F) { // Rarely, DXIL op overloads could collide, due to different types with same // name. Later, we will rename these functions, but for now, we need to // prevent clobbering an existing entry. auto &entry = m_functionDecls[F->getName()]; if (entry.first.insert(F->getFunctionType()).second) entry.second.push_back(F); } // Clone of StripDeadDebugInfo::runOnModule. // Also remove function which not not in current Module. void DxilLinkJob::StripDeadDebugInfo(Module &M) { LLVMContext &C = M.getContext(); // Find all debug info in F. This is actually overkill in terms of what we // want to do, but we want to try and be as resilient as possible in the face // of potential debug info changes by using the formal interfaces given to us // as much as possible. DebugInfoFinder F; F.processModule(M); // For each compile unit, find the live set of global variables/functions and // replace the current list of potentially dead global variables/functions // with the live list. SmallVector<Metadata *, 64> LiveGlobalVariables; SmallVector<Metadata *, 64> LiveSubprograms; DenseSet<const MDNode *> VisitedSet; for (DICompileUnit *DIC : F.compile_units()) { // Create our live subprogram list. bool SubprogramChange = false; for (DISubprogram *DISP : DIC->getSubprograms()) { // Make sure we visit each subprogram only once. if (!VisitedSet.insert(DISP).second) continue; // If the function referenced by DISP is not null, the function is live. if (Function *Func = DISP->getFunction()) { LiveSubprograms.push_back(DISP); if (Func->getParent() != &M) DISP->replaceFunction(nullptr); } else { // Copy it in anyway even if there's no function. When function is // inlined the function reference is gone, but the subprogram is still // valid as scope. LiveSubprograms.push_back(DISP); } } // Create our live global variable list. bool GlobalVariableChange = false; for (DIGlobalVariable *DIG : DIC->getGlobalVariables()) { // Make sure we only visit each global variable only once. if (!VisitedSet.insert(DIG).second) continue; // If the global variable referenced by DIG is not null, the global // variable is live. if (Constant *CV = DIG->getVariable()) { if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CV)) { if (GV->getParent() == &M) { LiveGlobalVariables.push_back(DIG); } else { GlobalVariableChange = true; } } else { LiveGlobalVariables.push_back(DIG); } } else { GlobalVariableChange = true; } } // If we found dead subprograms or global variables, replace the current // subprogram list/global variable list with our new live subprogram/global // variable list. if (SubprogramChange) { DIC->replaceSubprograms(MDTuple::get(C, LiveSubprograms)); } if (GlobalVariableChange) { DIC->replaceGlobalVariables(MDTuple::get(C, LiveGlobalVariables)); } // Reset lists for the next iteration. LiveSubprograms.clear(); LiveGlobalVariables.clear(); } } // TODO: move FixShaderModelMismatch to separate file. #include "dxc/DXIL/DxilInstructions.h" namespace { bool onlyUsedByAnnotateHandle(Value *V) { bool bResult = true; for (User *U : V->users()) { CallInst *CI = dyn_cast<CallInst>(U); if (!CI) { bResult = false; break; } DxilInst_AnnotateHandle Hdl(CI); if (!Hdl) { bResult = false; break; } } return bResult; } DxilResourceBase * findResourceFromPtr(Value *Ptr, DxilModule &DM, DenseMap<Value *, DxilResourceBase *> &PtrResMap) { auto it = PtrResMap.find(Ptr); if (Ptr) return it->second; DxilResourceBase *Res = nullptr; if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { DXASSERT(false, "global resource should already in map"); } else { // Not support allocaInst of resource when missing annotateHandle. GEPOperator *GEP = cast<GEPOperator>(Ptr); Res = findResourceFromPtr(GEP->getPointerOperand(), DM, PtrResMap); } PtrResMap[Ptr] = Res; return Res; } template <typename T> void addGVFromResTable(T &Tab, DenseMap<Value *, DxilResourceBase *> &PtrResMap) { for (auto &it : Tab) { PtrResMap[it->GetGlobalSymbol()] = it.get(); } } // Make sure createHandleForLib is annotated before use. bool addAnnotHandle(Module &M, DxilModule &DM) { hlsl::OP *hlslOP = DM.GetOP(); auto *pSM = DM.GetShaderModel(); if (!pSM->IsSM66Plus()) return false; // If no createHandleForLib, do nothing. if (!hlslOP->IsDxilOpUsed(DXIL::OpCode::CreateHandleForLib)) return false; Type *pVoidTy = Type::getVoidTy(M.getContext()); SmallVector<CallInst *, 4> Candidates; for (Function &F : M) { if (!F.isDeclaration()) continue; if (!hlslOP->IsDxilOpFunc(&F)) continue; DXIL::OpCodeClass opClass; if (!hlslOP->GetOpCodeClass(&F, opClass)) continue; if (opClass != DXIL::OpCodeClass::CreateHandleForLib) continue; for (User *U : F.users()) { CallInst *CI = cast<CallInst>(U); // Check user is annotateHandle. if (onlyUsedByAnnotateHandle(CI)) continue; Candidates.emplace_back(CI); } } if (Candidates.empty()) return false; DenseMap<Value *, DxilResourceBase *> PtrResMap; // Add GV from resTable first. addGVFromResTable(DM.GetCBuffers(), PtrResMap); addGVFromResTable(DM.GetSRVs(), PtrResMap); addGVFromResTable(DM.GetUAVs(), PtrResMap); addGVFromResTable(DM.GetSamplers(), PtrResMap); Function *annotHandleFn = hlslOP->GetOpFunc(DXIL::OpCode::AnnotateHandle, pVoidTy); Value *annotHandleArg = hlslOP->GetI32Const((unsigned)DXIL::OpCode::AnnotateHandle); // Replace createHandle with annotateHandle and createHandleFromBinding. Type *resPropertyTy = hlslOP->GetResourcePropertiesType(); for (CallInst *CI : Candidates) { DxilInst_CreateHandleForLib Hdl(CI); LoadInst *Ld = cast<LoadInst>(Hdl.get_Resource()); Value *Ptr = Ld->getPointerOperand(); DxilResourceBase *Res = findResourceFromPtr(Ptr, DM, PtrResMap); DXASSERT(Res, "fail to find resource when missing annotateHandle"); DxilResourceProperties RP = resource_helper::loadPropsFromResourceBase(Res); Value *propertiesV = resource_helper::getAsConstant(RP, resPropertyTy, *DM.GetShaderModel()); IRBuilder<> B(CI->getNextNode()); CallInst *annotHdl = B.CreateCall(annotHandleFn, {annotHandleArg, CI, propertiesV}); CI->replaceAllUsesWith(annotHdl); annotHdl->setArgOperand(DxilInst_AnnotateHandle::arg_res, CI); } return true; } } // namespace void DxilLinkJob::FixShaderModelMismatch(llvm::Module &M) { // TODO: fix more issues. addAnnotHandle(M, M.GetDxilModule()); } void DxilLinkJob::RunPreparePass(Module &M) { StripDeadDebugInfo(M); FixShaderModelMismatch(M); DxilModule &DM = M.GetDxilModule(); const ShaderModel *pSM = DM.GetShaderModel(); legacy::PassManager PM; PM.add(createDxilReinsertNopsPass()); PM.add(createAlwaysInlinerPass(/*InsertLifeTime*/ false)); // Remove unused functions. PM.add(createDxilDeadFunctionEliminationPass()); // SROA PM.add(createSROAPass(/*RequiresDomTree*/ false, /*SkipHLSLMat*/ false)); // For static global handle. PM.add(createLowerStaticGlobalIntoAlloca()); // Remove MultiDimArray from function call arg. PM.add(createMultiDimArrayToOneDimArrayPass()); // Lower matrix bitcast. PM.add(createMatrixBitcastLowerPass()); // mem2reg. PM.add(createPromoteMemoryToRegisterPass()); // Clean up vectors, and run mem2reg again PM.add(createScalarizerPass()); PM.add(createPromoteMemoryToRegisterPass()); PM.add(createSimplifyInstPass()); PM.add(createCFGSimplificationPass()); PM.add(createDeadCodeEliminationPass()); PM.add(createGlobalDCEPass()); if (pSM->IsSM66Plus() && pSM->IsLib()) PM.add(createDxilMutateResourceToHandlePass()); PM.add(createDxilCleanupDynamicResourceHandlePass()); PM.add(createDxilLowerCreateHandleForLibPass()); PM.add(createDxilTranslateRawBuffer()); PM.add(createDxilFinalizeModulePass()); PM.add(createComputeViewIdStatePass()); PM.add(createDxilDeadFunctionEliminationPass()); PM.add(createNoPausePassesPass()); PM.add(createDxilEmitMetadataPass()); PM.add(createDxilFinalizePreservesPass()); PM.run(M); } //------------------------------------------------------------------------------ // // DxilLinkerImpl methods. // bool DxilLinkerImpl::HasLibNameRegistered(StringRef name) { return m_LibMap.count(name); } bool DxilLinkerImpl::RegisterLib(StringRef name, std::unique_ptr<llvm::Module> pModule, std::unique_ptr<llvm::Module> pDebugModule) { if (m_LibMap.count(name)) return false; std::unique_ptr<llvm::Module> pM = pDebugModule ? std::move(pDebugModule) : std::move(pModule); if (!pM) return false; pM->setModuleIdentifier(name); std::unique_ptr<DxilLib> pLib = llvm::make_unique<DxilLib>(std::move(pM)); m_LibMap[name] = std::move(pLib); return true; } bool DxilLinkerImpl::AttachLib(StringRef name) { auto iter = m_LibMap.find(name); if (iter == m_LibMap.end()) { return false; } return AttachLib(iter->second.get()); } bool DxilLinkerImpl::DetachLib(StringRef name) { auto iter = m_LibMap.find(name); if (iter == m_LibMap.end()) { return false; } return DetachLib(iter->second.get()); } void DxilLinkerImpl::DetachAll() { m_functionNameMap.clear(); m_attachedLibs.clear(); } bool DxilLinkerImpl::AttachLib(DxilLib *lib) { if (!lib) { // Invalid arg. return false; } if (m_attachedLibs.count(lib)) return false; StringMap<std::unique_ptr<DxilFunctionLinkInfo>> &funcTable = lib->GetFunctionTable(); bool bSuccess = true; for (auto it = funcTable.begin(), e = funcTable.end(); it != e; it++) { StringRef name = it->getKey(); if (m_functionNameMap.count(name)) { // Redefine of function. const DxilFunctionLinkInfo *DFLI = it->getValue().get(); dxilutil::EmitErrorOnFunction(m_ctx, DFLI->func, Twine(kRedefineFunction) + name); bSuccess = false; continue; } m_functionNameMap[name] = std::make_pair(it->second.get(), lib); } if (bSuccess) { m_attachedLibs.insert(lib); } else { for (auto it = funcTable.begin(), e = funcTable.end(); it != e; it++) { StringRef name = it->getKey(); auto iter = m_functionNameMap.find(name); if (iter == m_functionNameMap.end()) continue; // Remove functions of lib. if (m_functionNameMap[name].second == lib) m_functionNameMap.erase(name); } } return bSuccess; } bool DxilLinkerImpl::DetachLib(DxilLib *lib) { if (!lib) { // Invalid arg. return false; } if (!m_attachedLibs.count(lib)) return false; m_attachedLibs.erase(lib); // Remove functions from lib. StringMap<std::unique_ptr<DxilFunctionLinkInfo>> &funcTable = lib->GetFunctionTable(); for (auto it = funcTable.begin(), e = funcTable.end(); it != e; it++) { StringRef name = it->getKey(); m_functionNameMap.erase(name); } return true; } bool DxilLinkerImpl::AddFunctions(SmallVector<StringRef, 4> &workList, SetVector<DxilLib *> &libSet, SetVector<StringRef> &addedFunctionSet, DxilLinkJob &linkJob, bool bLazyLoadDone, bool bAllowFuncionDecls) { while (!workList.empty()) { StringRef name = workList.pop_back_val(); // Ignore added function. if (addedFunctionSet.count(name)) continue; if (!m_functionNameMap.count(name)) { // Cannot find function, report error. dxilutil::EmitErrorOnContext(m_ctx, Twine(kUndefFunction) + name); return false; } std::pair<DxilFunctionLinkInfo *, DxilLib *> &linkPair = m_functionNameMap[name]; linkJob.AddFunction(linkPair); DxilLib *pLib = linkPair.second; libSet.insert(pLib); if (!bLazyLoadDone) { Function *F = linkPair.first->func; pLib->LazyLoadFunction(F); } for (Function *F : linkPair.first->usedFunctions) { if (hlsl::OP::IsDxilOpFunc(F) || F->isIntrinsic()) { // Add dxil operations directly. linkJob.AddFunction(F); } else if (addedFunctionSet.count(F->getName()) == 0) { if (bAllowFuncionDecls && F->isDeclaration() && !m_functionNameMap.count(F->getName())) { // When linking to lib, use of undefined function is allowed; add // directly. linkJob.AddFunction(F); } else { // Push function name to work list. workList.emplace_back(F->getName()); } } } addedFunctionSet.insert(name); } return true; } std::unique_ptr<llvm::Module> DxilLinkerImpl::Link(StringRef entry, StringRef profile, dxilutil::ExportMap &exportMap) { const ShaderModel *pSM = ShaderModel::GetByName(profile.data()); DXIL::ShaderKind kind = pSM->GetKind(); if (kind == DXIL::ShaderKind::Invalid || (kind >= DXIL::ShaderKind::RayGeneration && kind <= DXIL::ShaderKind::Callable)) { // Invalid profile, user error emitted earlier with option check llvm_unreachable("invalid profile kind to link"); return nullptr; } if (!exportMap.empty() && kind != DXIL::ShaderKind::Library) { llvm_unreachable("export map is only for library"); return nullptr; } // Verifying validator version supports the requested profile unsigned minValMajor, minValMinor; pSM->GetMinValidatorVersion(minValMajor, minValMinor); if (minValMajor > m_valMajor || (minValMajor == m_valMajor && minValMinor > m_valMinor)) { dxilutil::EmitErrorOnContext(m_ctx, Twine(kInvalidValidatorVersion) + profile); return nullptr; } DxilLinkJob linkJob(m_ctx, exportMap, m_valMajor, m_valMinor); SetVector<DxilLib *> libSet; SetVector<StringRef> addedFunctionSet; bool bIsLib = pSM->IsLib(); if (!bIsLib) { SmallVector<StringRef, 4> workList; workList.emplace_back(entry); if (!AddFunctions(workList, libSet, addedFunctionSet, linkJob, /*bLazyLoadDone*/ false, /*bAllowFuncionDecls*/ false)) return nullptr; } else { if (exportMap.empty() && !exportMap.isExportShadersOnly()) { // Add every function for lib profile. for (auto &it : m_functionNameMap) { StringRef name = it.getKey(); std::pair<DxilFunctionLinkInfo *, DxilLib *> &linkPair = it.second; DxilFunctionLinkInfo *linkInfo = linkPair.first; DxilLib *pLib = linkPair.second; Function *F = linkInfo->func; pLib->LazyLoadFunction(F); linkJob.AddFunction(linkPair); libSet.insert(pLib); addedFunctionSet.insert(name); } // Add every dxil function and llvm intrinsic. for (auto *pLib : libSet) { auto &DM = pLib->GetDxilModule(); DM.GetOP(); auto *pM = DM.GetModule(); for (Function &F : pM->functions()) { if (hlsl::OP::IsDxilOpFunc(&F) || F.isIntrinsic() || (F.isDeclaration() && m_functionNameMap.count(F.getName()) == 0)) { // Add intrinsics and function decls still not defined in any lib linkJob.AddFunction(&F); } } } } else if (exportMap.isExportShadersOnly()) { SmallVector<StringRef, 4> workList; for (auto *pLib : m_attachedLibs) { auto &DM = pLib->GetDxilModule(); auto *pM = DM.GetModule(); for (Function &F : pM->functions()) { if (!pLib->IsEntry(&F)) { if (!F.isDeclaration()) { // Set none entry to be internal so they could be removed. F.setLinkage(GlobalValue::LinkageTypes::InternalLinkage); } continue; } workList.emplace_back(F.getName()); } libSet.insert(pLib); } if (!AddFunctions(workList, libSet, addedFunctionSet, linkJob, /*bLazyLoadDone*/ false, /*bAllowFuncionDecls*/ false)) return nullptr; } else { SmallVector<StringRef, 4> workList; // Only add exported functions. for (auto &it : m_functionNameMap) { StringRef name = it.getKey(); // Only add names exist in exportMap. if (exportMap.IsExported(name)) workList.emplace_back(name); } if (!AddFunctions(workList, libSet, addedFunctionSet, linkJob, /*bLazyLoadDone*/ false, /*bAllowFuncionDecls*/ true)) return nullptr; } } // Save global users. for (auto &pLib : libSet) { pLib->BuildGlobalUsage(); } SmallVector<StringRef, 4> workList; // Save global ctor users. for (auto &pLib : libSet) { pLib->CollectUsedInitFunctions(addedFunctionSet, workList); } for (auto &pLib : libSet) { pLib->FixIntrinsicOverloads(); } // Add init functions if used. // All init function already loaded in BuildGlobalUsage, // so set bLazyLoadDone to true here. // Decls should have been added to addedFunctionSet if lib, // so set bAllowFuncionDecls is false here. if (!AddFunctions(workList, libSet, addedFunctionSet, linkJob, /*bLazyLoadDone*/ true, /*bAllowFuncionDecls*/ false)) return nullptr; if (!bIsLib) { std::pair<DxilFunctionLinkInfo *, DxilLib *> &entryLinkPair = m_functionNameMap[entry]; return linkJob.Link(entryLinkPair, pSM); } else { return linkJob.LinkToLib(pSM); } } namespace hlsl { DxilLinker *DxilLinker::CreateLinker(LLVMContext &Ctx, unsigned valMajor, unsigned valMinor) { return new DxilLinkerImpl(Ctx, valMajor, valMinor); } } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/WaveSensitivityAnalysis.cpp
/////////////////////////////////////////////////////////////////////////////// // // // WaveSensitivityAnalysis.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // This file provides support for doing analysis that are aware of wave // // intrinsics. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilInstructions.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/Support/Global.h" #include "llvm/Analysis/PostDominators.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Instructions.h" using namespace llvm; using std::map; namespace hlsl { // WaveSensitivityAnalysis is created to validate Gradient operations. // Gradient operations require all neighbor lanes to be active when calculated, // compiler will enable lanes to meet this requirement. If a wave operation // contributed to gradient operation, it will get unexpected result because the // active lanes are modified. // To avoid unexpected result, validation will fail if gradient operations // are dependent on wave-sensitive data or control flow. class WaveSensitivityAnalyzer : public WaveSensitivityAnalysis { private: enum WaveSensitivity { KnownSensitive, KnownNotSensitive, Unknown }; PostDominatorTree *pPDT; map<Instruction *, WaveSensitivity> InstState; map<BasicBlock *, WaveSensitivity> BBState; std::vector<Instruction *> InstWorkList; std::vector<PHINode *> UnknownPhis; // currently unknown phis. Indicate cycles after Analyze std::vector<BasicBlock *> BBWorkList; bool CheckBBState(BasicBlock *BB, WaveSensitivity WS); WaveSensitivity GetInstState(Instruction *I); void UpdateBlock(BasicBlock *BB, WaveSensitivity WS); void UpdateInst(Instruction *I, WaveSensitivity WS); void VisitInst(Instruction *I); public: WaveSensitivityAnalyzer(PostDominatorTree &PDT) : pPDT(&PDT) {} void Analyze(Function *F); void Analyze(); bool IsWaveSensitive(Instruction *op); }; WaveSensitivityAnalysis * WaveSensitivityAnalysis::create(PostDominatorTree &PDT) { return new WaveSensitivityAnalyzer(PDT); } // Analyze the given function's instructions as wave-sensitive or not void WaveSensitivityAnalyzer::Analyze(Function *F) { // Add all blocks but the entry in reverse order so they come out in order auto it = F->getBasicBlockList().end(); for (it--; it != F->getBasicBlockList().begin(); it--) BBWorkList.emplace_back(&*it); // Add entry block as non-sensitive UpdateBlock(&*it, KnownNotSensitive); // First analysis Analyze(); // If any phis with explored preds remain unknown // it has to be in a loop that don't include wave sensitivity // Update each as such and redo Analyze to mark the descendents while (!UnknownPhis.empty() || !InstWorkList.empty() || !BBWorkList.empty()) { while (!UnknownPhis.empty()) { PHINode *Phi = UnknownPhis.back(); UnknownPhis.pop_back(); // UnknownPhis might have actually known phis that were changed. skip them if (Unknown == GetInstState(Phi)) { // If any of the preds have not been visited, we can't assume a cycle // yet bool allPredsVisited = true; for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { if (!BBState.count(Phi->getIncomingBlock(i))) { allPredsVisited = false; break; } } #ifndef NDEBUG for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { if (Instruction *IArg = dyn_cast<Instruction>(Phi->getIncomingValue(i))) { DXASSERT_LOCALVAR(IArg, GetInstState(IArg) != KnownSensitive, "Unknown wave-status Phi argument should not be " "able to be known sensitive"); } } #endif if (allPredsVisited) UpdateInst(Phi, KnownNotSensitive); } } Analyze(); } #ifndef NDEBUG for (BasicBlock &BB : *F) { for (Instruction &I : BB) { DXASSERT_LOCALVAR(I, Unknown != GetInstState(&I), "Wave sensitivity analysis exited without finding " "results for all instructions"); } } #endif } // Analyze the member instruction and BBlock worklists void WaveSensitivityAnalyzer::Analyze() { while (!InstWorkList.empty() || !BBWorkList.empty()) { // Process the instruction work list. while (!InstWorkList.empty()) { Instruction *I = InstWorkList.back(); InstWorkList.pop_back(); // "I" got into the work list because it made a transition. for (User *U : I->users()) { Instruction *UI = cast<Instruction>(U); VisitInst(UI); } } // Process one entry of the basic block work list. if (!BBWorkList.empty()) { BasicBlock *BB = BBWorkList.back(); BBWorkList.pop_back(); // Notify all instructions in this basic block that they need to // be reevaluated (eg, a block previously though to be insensitive // is now sensitive). for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) VisitInst(I); } } } bool WaveSensitivityAnalyzer::CheckBBState(BasicBlock *BB, WaveSensitivity WS) { auto c = BBState.find(BB); if (c == BBState.end()) { return WS == Unknown; } else { return (*c).second == WS; } } WaveSensitivityAnalyzer::WaveSensitivity WaveSensitivityAnalyzer::GetInstState(Instruction *I) { auto c = InstState.find(I); if (c == InstState.end()) return Unknown; return (*c).second; } void WaveSensitivityAnalyzer::UpdateBlock(BasicBlock *BB, WaveSensitivity WS) { auto c = BBState.find(BB); // Do not update if an entry is already found and it hasn't changed or // has already been marked as wave sensitive (an insensitive term might // try to mark it as such, but this effectively implements the 'any pred' // rule). if (c != BBState.end() && ((*c).second == WS || (*c).second == KnownSensitive)) return; BBState[BB] = WS; BBWorkList.push_back(BB); } void WaveSensitivityAnalyzer::UpdateInst(Instruction *I, WaveSensitivity WS) { auto c = InstState.find(I); if (c == InstState.end() || (*c).second != WS) { InstState[I] = WS; InstWorkList.push_back(I); if (TerminatorInst *TI = dyn_cast<TerminatorInst>(I)) { BasicBlock *CurBB = TI->getParent(); for (unsigned i = 0; i < TI->getNumSuccessors(); ++i) { BasicBlock *BB = TI->getSuccessor(i); // Only propagate WS when BB not post dom CurBB. WaveSensitivity TmpWS = pPDT->properlyDominates(BB, CurBB) ? WaveSensitivity::KnownNotSensitive : WS; UpdateBlock(BB, TmpWS); } } } } void WaveSensitivityAnalyzer::VisitInst(Instruction *I) { unsigned firstArg = 0; if (CallInst *CI = dyn_cast<CallInst>(I)) { if (OP::IsDxilOpFuncCallInst(CI)) { firstArg = 1; OP::OpCode opcode = OP::GetDxilOpFuncCallInst(CI); if (OP::IsDxilOpWave(opcode)) { UpdateInst(I, KnownSensitive); return; } } } if (CheckBBState(I->getParent(), KnownSensitive)) { UpdateInst(I, KnownSensitive); return; } // Catch control flow wave sensitive for phi. if (PHINode *Phi = dyn_cast<PHINode>(I)) { for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { BasicBlock *BB = Phi->getIncomingBlock(i); WaveSensitivity WS = GetInstState(BB->getTerminator()); if (WS == KnownSensitive) { UpdateInst(I, KnownSensitive); return; } else if (Unknown == GetInstState(I)) { UnknownPhis.emplace_back(Phi); } } } bool allKnownNotSensitive = true; for (unsigned i = firstArg; i < I->getNumOperands(); ++i) { Value *V = I->getOperand(i); if (Instruction *IArg = dyn_cast<Instruction>(V)) { WaveSensitivity WS = GetInstState(IArg); if (WS == KnownSensitive) { UpdateInst(I, KnownSensitive); return; } else if (WS == Unknown) { allKnownNotSensitive = false; } } } if (allKnownNotSensitive) { UpdateInst(I, KnownNotSensitive); } } bool WaveSensitivityAnalyzer::IsWaveSensitive(Instruction *op) { auto c = InstState.find(op); if (c == InstState.end()) { DXASSERT(false, "Instruction sensitivity not foud. Analysis didn't complete!"); return false; } DXASSERT((*c).second != Unknown, "else analysis is missing a case"); return (*c).second == KnownSensitive; } } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilTargetTransformInfo.h
//===-- DxilTargetTransformInfo.h - DXIL specific TTI -------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// This file declares a TargetTransformInfo analysis pass specific to the DXIL. /// Only implemented isSourceOfDivergence for DivergenceAnalysis. /// //===----------------------------------------------------------------------===// #pragma once #include "llvm/CodeGen/BasicTTIImpl.h" namespace hlsl { class DxilModule; class OP; } // namespace hlsl namespace llvm { class DxilTTIImpl final : public BasicTTIImplBase<DxilTTIImpl> { typedef BasicTTIImplBase<DxilTTIImpl> BaseT; typedef TargetTransformInfo TTI; friend BaseT; hlsl::OP *m_pHlslOP; bool m_isThreadGroup; const TargetSubtargetInfo *getST() const { return nullptr; } const TargetLowering *getTLI() const { return nullptr; } public: explicit DxilTTIImpl(const TargetMachine *TM, const Function &F, hlsl::DxilModule &DM, bool ThreadGroup); bool hasBranchDivergence() { return true; } bool isSourceOfDivergence(const Value *V) const; }; } // end namespace llvm
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLMatrixSubscriptUseReplacer.h
/////////////////////////////////////////////////////////////////////////////// // // // HLMatrixSubscriptUseReplacer.h // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #pragma once #include "llvm/ADT/SmallVector.h" #include "llvm/IR/IRBuilder.h" #include <vector> namespace llvm { class Value; class AllocaInst; class CallInst; class Instruction; class Function; } // namespace llvm namespace hlsl { // Implements recursive replacement of a matrix subscript's uses, // from a pointer to a matrix value to a pointer to its lowered vector version, // whether directly or through GEPs in the case of two-level indexing like // mat[i][j]. This has to handle one or two levels of indices, each of which // either constant or dynamic: mat[0], mat[i], mat[0][0], mat[i][0], mat[0][j], // mat[i][j], plus the equivalent element accesses: mat._11, mat._11_12, // mat._11_12[0], mat._11_12[i] class HLMatrixSubscriptUseReplacer { public: // The constructor does everything HLMatrixSubscriptUseReplacer( llvm::CallInst *Call, llvm::Value *LoweredPtr, llvm::Value *TempLoweredMatrix, llvm::SmallVectorImpl<llvm::Value *> &ElemIndices, bool AllowLoweredPtrGEPs, std::vector<llvm::Instruction *> &DeadInsts); private: void replaceUses(llvm::Instruction *PtrInst, llvm::Value *SubIdxVal); llvm::Value *tryGetScalarIndex(llvm::Value *SubIdxVal, llvm::IRBuilder<> &Builder); void cacheLoweredMatrix(bool ForDynamicIndexing, llvm::IRBuilder<> &Builder); llvm::Value *loadElem(llvm::Value *Idx, llvm::IRBuilder<> &Builder); void storeElem(llvm::Value *Idx, llvm::Value *Elem, llvm::IRBuilder<> &Builder); llvm::Value *loadVector(llvm::IRBuilder<> &Builder); void storeVector(llvm::Value *Vec, llvm::IRBuilder<> &Builder); void flushLoweredMatrix(llvm::IRBuilder<> &Builder); private: llvm::Value *LoweredPtr; llvm::SmallVectorImpl<llvm::Value *> &ElemIndices; std::vector<llvm::Instruction *> &DeadInsts; bool AllowLoweredPtrGEPs = false; bool HasScalarResult = false; bool HasDynamicElemIndex = false; llvm::Type *LoweredTy = nullptr; // The entire lowered matrix as loaded from LoweredPtr, // nullptr if we copied it to a temporary array. llvm::Value *TempLoweredMatrix = nullptr; // We allocate this if the level 1 indices are not all constants, // so we can dynamically index the lowered matrix vector. llvm::AllocaInst *LazyTempElemArrayAlloca = nullptr; // We'll allocate this lazily if we have a dynamic level 2 index (mat[0][i]), // so we can dynamically index the level 1 indices. llvm::AllocaInst *LazyTempElemIndicesArrayAlloca = nullptr; }; } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilLoopDeletion.cpp
//===- DxilLoopDeletion.cpp - Dead Loop Deletion Pass -----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file run LoopDeletion SimplifyCFG and DCE more than once to make sure // all unused loop can be removed. Use kMaxIteration to avoid dead loop. // //===----------------------------------------------------------------------===// #include "dxc/HLSL/DxilGenerationPass.h" #include "llvm/IR/Function.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" using namespace llvm; namespace { class DxilLoopDeletion : public FunctionPass { public: bool m_HLSLNoSink = false; static char ID; // Pass ID, replacement for typeid DxilLoopDeletion(bool NoSink = false) : FunctionPass(ID), m_HLSLNoSink(NoSink) {} bool runOnFunction(Function &F) override; void applyOptions(PassOptions O) override { GetPassOptionBool(O, "NoSink", &m_HLSLNoSink, /*defaultValue*/ false); } void dumpConfig(raw_ostream &OS) override { FunctionPass::dumpConfig(OS); OS << ",NoSink=" << m_HLSLNoSink; } }; } // namespace char DxilLoopDeletion::ID = 0; INITIALIZE_PASS(DxilLoopDeletion, "dxil-loop-deletion", "Dxil Delete dead loops", false, false) FunctionPass *llvm::createDxilLoopDeletionPass(bool NoSink) { return new DxilLoopDeletion(NoSink); } bool DxilLoopDeletion::runOnFunction(Function &F) { // Run loop simplify first to make sure loop invariant is moved so loop // deletion will not update the function if not delete. legacy::FunctionPassManager DeleteLoopPM(F.getParent()); DeleteLoopPM.add(createLoopDeletionPass()); bool bUpdated = false; legacy::FunctionPassManager SimplifyPM(F.getParent()); SimplifyPM.add(createCFGSimplificationPass()); SimplifyPM.add(createDeadCodeEliminationPass()); SimplifyPM.add(createInstructionCombiningPass(/*HLSL No sink*/ m_HLSLNoSink)); const unsigned kMaxIteration = 3; unsigned i = 0; while (i < kMaxIteration) { if (!DeleteLoopPM.run(F)) break; SimplifyPM.run(F); i++; bUpdated = true; } return bUpdated; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLOperationLower.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLOperationLower.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Lower functions to lower HL operations to DXIL operations. // // // /////////////////////////////////////////////////////////////////////////////// #define _USE_MATH_DEFINES #include <array> #include <cmath> #include <functional> #include <unordered_set> #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilResourceProperties.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/DxilPoisonValues.h" #include "dxc/HLSL/HLLowerUDT.h" #include "dxc/HLSL/HLMatrixLowerHelper.h" #include "dxc/HLSL/HLMatrixType.h" #include "dxc/HLSL/HLModule.h" #include "dxc/HLSL/HLOperationLower.h" #include "dxc/HLSL/HLOperationLowerExtension.h" #include "dxc/HLSL/HLOperations.h" #include "dxc/HlslIntrinsicOp.h" #include "llvm/ADT/APSInt.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" using namespace llvm; using namespace hlsl; struct HLOperationLowerHelper { HLModule &M; OP &hlslOP; Type *voidTy; Type *f32Ty; Type *i32Ty; Type *i16Ty; llvm::Type *i1Ty; Type *i8Ty; DxilTypeSystem &dxilTypeSys; DxilFunctionProps *functionProps; DataLayout dataLayout; SmallDenseMap<Type *, Type *, 4> loweredTypes; HLOperationLowerHelper(HLModule &HLM); }; HLOperationLowerHelper::HLOperationLowerHelper(HLModule &HLM) : M(HLM), hlslOP(*HLM.GetOP()), dxilTypeSys(HLM.GetTypeSystem()), dataLayout(DataLayout(HLM.GetHLOptions().bUseMinPrecision ? hlsl::DXIL::kLegacyLayoutString : hlsl::DXIL::kNewLayoutString)) { llvm::LLVMContext &Ctx = HLM.GetCtx(); voidTy = Type::getVoidTy(Ctx); f32Ty = Type::getFloatTy(Ctx); i32Ty = Type::getInt32Ty(Ctx); i16Ty = Type::getInt16Ty(Ctx); i1Ty = Type::getInt1Ty(Ctx); i8Ty = Type::getInt8Ty(Ctx); Function *EntryFunc = HLM.GetEntryFunction(); functionProps = nullptr; if (HLM.HasDxilFunctionProps(EntryFunc)) functionProps = &HLM.GetDxilFunctionProps(EntryFunc); } struct HLObjectOperationLowerHelper { private: // For object intrinsics. HLModule &HLM; struct ResAttribute { DXIL::ResourceClass RC; DXIL::ResourceKind RK; Type *ResourceType; }; std::unordered_map<Value *, ResAttribute> HandleMetaMap; std::unordered_set<Instruction *> &UpdateCounterSet; // Map from pointer of cbuffer to pointer of resource. // For cbuffer like this: // cbuffer A { // Texture2D T; // }; // A global resource Texture2D T2 will be created for Texture2D T. // CBPtrToResourceMap[T] will return T2. std::unordered_map<Value *, Value *> CBPtrToResourceMap; public: HLObjectOperationLowerHelper(HLModule &HLM, std::unordered_set<Instruction *> &UpdateCounter) : HLM(HLM), UpdateCounterSet(UpdateCounter) {} DXIL::ResourceClass GetRC(Value *Handle) { ResAttribute &Res = FindCreateHandleResourceBase(Handle); return Res.RC; } DXIL::ResourceKind GetRK(Value *Handle) { ResAttribute &Res = FindCreateHandleResourceBase(Handle); return Res.RK; } Type *GetResourceType(Value *Handle) { ResAttribute &Res = FindCreateHandleResourceBase(Handle); return Res.ResourceType; } void MarkHasCounter(Value *handle, Type *i8Ty) { CallInst *CIHandle = cast<CallInst>(handle); DXASSERT(hlsl::GetHLOpcodeGroup(CIHandle->getCalledFunction()) == HLOpcodeGroup::HLAnnotateHandle, "else invalid handle"); // Mark has counter for the input handle. Value *counterHandle = CIHandle->getArgOperand(HLOperandIndex::kHandleOpIdx); // Change kind into StructurBufferWithCounter. Constant *Props = cast<Constant>(CIHandle->getArgOperand( HLOperandIndex::kAnnotateHandleResourcePropertiesOpIdx)); DxilResourceProperties RP = resource_helper::loadPropsFromConstant(*Props); RP.Basic.SamplerCmpOrHasCounter = true; CIHandle->setArgOperand( HLOperandIndex::kAnnotateHandleResourcePropertiesOpIdx, resource_helper::getAsConstant(RP, HLM.GetOP()->GetResourcePropertiesType(), *HLM.GetShaderModel())); DXIL::ResourceClass RC = GetRC(handle); DXASSERT_LOCALVAR(RC, RC == DXIL::ResourceClass::UAV, "must UAV for counter"); std::unordered_set<Value *> resSet; MarkHasCounterOnCreateHandle(counterHandle, resSet); } DxilResourceBase *FindCBufferResourceFromHandle(Value *handle) { if (CallInst *CI = dyn_cast<CallInst>(handle)) { hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroupByName(CI->getCalledFunction()); if (group == HLOpcodeGroup::HLAnnotateHandle) { handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); } } Constant *symbol = nullptr; if (CallInst *CI = dyn_cast<CallInst>(handle)) { hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroupByName(CI->getCalledFunction()); if (group == HLOpcodeGroup::HLCreateHandle) { symbol = dyn_cast<Constant>( CI->getArgOperand(HLOperandIndex::kCreateHandleResourceOpIdx)); } } if (!symbol) return nullptr; for (const std::unique_ptr<DxilCBuffer> &res : HLM.GetCBuffers()) { if (res->GetGlobalSymbol() == symbol) return res.get(); } return nullptr; } Value *GetOrCreateResourceForCbPtr(GetElementPtrInst *CbPtr, GlobalVariable *CbGV, DxilResourceProperties &RP) { // Change array idx to 0 to make sure all array ptr share same key. Value *Key = UniformCbPtr(CbPtr, CbGV); if (CBPtrToResourceMap.count(Key)) return CBPtrToResourceMap[Key]; Value *Resource = CreateResourceForCbPtr(CbPtr, CbGV, RP); CBPtrToResourceMap[Key] = Resource; return Resource; } Value *LowerCbResourcePtr(GetElementPtrInst *CbPtr, Value *ResPtr) { // Simple case. if (ResPtr->getType() == CbPtr->getType()) return ResPtr; // Array case. DXASSERT_NOMSG(ResPtr->getType()->getPointerElementType()->isArrayTy()); IRBuilder<> Builder(CbPtr); gep_type_iterator GEPIt = gep_type_begin(CbPtr), E = gep_type_end(CbPtr); Value *arrayIdx = GEPIt.getOperand(); // Only calc array idx and size. // Ignore struct type part. for (; GEPIt != E; ++GEPIt) { if (GEPIt->isArrayTy()) { arrayIdx = Builder.CreateMul( arrayIdx, Builder.getInt32(GEPIt->getArrayNumElements())); arrayIdx = Builder.CreateAdd(arrayIdx, GEPIt.getOperand()); } } return Builder.CreateGEP(ResPtr, {Builder.getInt32(0), arrayIdx}); } DxilResourceProperties GetResPropsFromAnnotateHandle(CallInst *Anno) { Constant *Props = cast<Constant>(Anno->getArgOperand( HLOperandIndex::kAnnotateHandleResourcePropertiesOpIdx)); DxilResourceProperties RP = resource_helper::loadPropsFromConstant(*Props); return RP; } private: ResAttribute &FindCreateHandleResourceBase(Value *Handle) { if (HandleMetaMap.count(Handle)) return HandleMetaMap[Handle]; // Add invalid first to avoid dead loop. HandleMetaMap[Handle] = { DXIL::ResourceClass::Invalid, DXIL::ResourceKind::Invalid, StructType::get(Type::getVoidTy(HLM.GetCtx()), nullptr)}; if (CallInst *CI = dyn_cast<CallInst>(Handle)) { hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroupByName(CI->getCalledFunction()); if (group == HLOpcodeGroup::HLAnnotateHandle) { Constant *Props = cast<Constant>(CI->getArgOperand( HLOperandIndex::kAnnotateHandleResourcePropertiesOpIdx)); DxilResourceProperties RP = resource_helper::loadPropsFromConstant(*Props); Type *ResTy = CI->getArgOperand(HLOperandIndex::kAnnotateHandleResourceTypeOpIdx) ->getType(); ResAttribute Attrib = {RP.getResourceClass(), RP.getResourceKind(), ResTy}; HandleMetaMap[Handle] = Attrib; return HandleMetaMap[Handle]; } } dxilutil::EmitErrorOnContext(Handle->getContext(), "cannot map resource to handle."); return HandleMetaMap[Handle]; } CallInst *FindCreateHandle(Value *handle, std::unordered_set<Value *> &resSet) { // Already checked. if (resSet.count(handle)) return nullptr; resSet.insert(handle); if (CallInst *CI = dyn_cast<CallInst>(handle)) return CI; if (SelectInst *Sel = dyn_cast<SelectInst>(handle)) { if (CallInst *CI = FindCreateHandle(Sel->getTrueValue(), resSet)) return CI; if (CallInst *CI = FindCreateHandle(Sel->getFalseValue(), resSet)) return CI; return nullptr; } if (PHINode *Phi = dyn_cast<PHINode>(handle)) { for (unsigned i = 0; i < Phi->getNumOperands(); i++) { if (CallInst *CI = FindCreateHandle(Phi->getOperand(i), resSet)) return CI; } return nullptr; } return nullptr; } void MarkHasCounterOnCreateHandle(Value *handle, std::unordered_set<Value *> &resSet) { // Already checked. if (resSet.count(handle)) return; resSet.insert(handle); if (CallInst *CI = dyn_cast<CallInst>(handle)) { Value *Res = CI->getArgOperand(HLOperandIndex::kCreateHandleResourceOpIdx); LoadInst *LdRes = dyn_cast<LoadInst>(Res); if (LdRes) { UpdateCounterSet.insert(LdRes); return; } if (CallInst *CallRes = dyn_cast<CallInst>(Res)) { hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroup(CallRes->getCalledFunction()); if (group == HLOpcodeGroup::HLCast) { HLCastOpcode opcode = static_cast<HLCastOpcode>(hlsl::GetHLOpcode(CallRes)); if (opcode == HLCastOpcode::HandleToResCast) { if (Instruction *Hdl = dyn_cast<Instruction>( CallRes->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx))) UpdateCounterSet.insert(Hdl); return; } } } dxilutil::EmitErrorOnInstruction(CI, "cannot map resource to handle."); return; } if (SelectInst *Sel = dyn_cast<SelectInst>(handle)) { MarkHasCounterOnCreateHandle(Sel->getTrueValue(), resSet); MarkHasCounterOnCreateHandle(Sel->getFalseValue(), resSet); } if (PHINode *Phi = dyn_cast<PHINode>(handle)) { for (unsigned i = 0; i < Phi->getNumOperands(); i++) { MarkHasCounterOnCreateHandle(Phi->getOperand(i), resSet); } } } Value *UniformCbPtr(GetElementPtrInst *CbPtr, GlobalVariable *CbGV) { gep_type_iterator GEPIt = gep_type_begin(CbPtr), E = gep_type_end(CbPtr); std::vector<Value *> idxList(CbPtr->idx_begin(), CbPtr->idx_end()); unsigned i = 0; IRBuilder<> Builder(HLM.GetCtx()); Value *zero = Builder.getInt32(0); for (; GEPIt != E; ++GEPIt, ++i) { ConstantInt *ImmIdx = dyn_cast<ConstantInt>(GEPIt.getOperand()); if (!ImmIdx) { // Remove dynamic indexing to avoid crash. idxList[i] = zero; } } Value *Key = Builder.CreateInBoundsGEP(CbGV, idxList); return Key; } Value *CreateResourceForCbPtr(GetElementPtrInst *CbPtr, GlobalVariable *CbGV, DxilResourceProperties &RP) { Type *CbTy = CbPtr->getPointerOperandType(); DXASSERT_LOCALVAR(CbTy, CbTy == CbGV->getType(), "else arg not point to var"); gep_type_iterator GEPIt = gep_type_begin(CbPtr), E = gep_type_end(CbPtr); unsigned i = 0; IRBuilder<> Builder(HLM.GetCtx()); unsigned arraySize = 1; DxilTypeSystem &typeSys = HLM.GetTypeSystem(); std::string Name; for (; GEPIt != E; ++GEPIt, ++i) { if (GEPIt->isArrayTy()) { arraySize *= GEPIt->getArrayNumElements(); if (!Name.empty()) Name += "."; if (ConstantInt *ImmIdx = dyn_cast<ConstantInt>(GEPIt.getOperand())) { unsigned idx = ImmIdx->getLimitedValue(); Name += std::to_string(idx); } } else if (GEPIt->isStructTy()) { DxilStructAnnotation *typeAnnot = typeSys.GetStructAnnotation(cast<StructType>(*GEPIt)); DXASSERT_NOMSG(typeAnnot); unsigned idx = cast<ConstantInt>(GEPIt.getOperand())->getLimitedValue(); DXASSERT_NOMSG(typeAnnot->GetNumFields() > idx); DxilFieldAnnotation &fieldAnnot = typeAnnot->GetFieldAnnotation(idx); if (!Name.empty()) Name += "."; Name += fieldAnnot.GetFieldName(); } } Type *Ty = CbPtr->getResultElementType(); // Not support resource array in cbuffer. unsigned ResBinding = HLM.GetBindingForResourceInCB(CbPtr, CbGV, RP.getResourceClass()); return CreateResourceGV(Ty, Name, RP, ResBinding); } Value *CreateResourceGV(Type *Ty, StringRef Name, DxilResourceProperties &RP, unsigned ResBinding) { Module &M = *HLM.GetModule(); Constant *GV = M.getOrInsertGlobal(Name, Ty); // Create resource and set GV as globalSym. DxilResourceBase *Res = HLM.AddResourceWithGlobalVariableAndProps(GV, RP); DXASSERT(Res, "fail to create resource for global variable in cbuffer"); Res->SetLowerBound(ResBinding); return GV; } }; // Helper for lowering resource extension methods. struct HLObjectExtensionLowerHelper : public hlsl::HLResourceLookup { explicit HLObjectExtensionLowerHelper(HLObjectOperationLowerHelper &ObjHelper) : m_ObjHelper(ObjHelper) {} virtual bool GetResourceKindName(Value *HLHandle, const char **ppName) { DXIL::ResourceKind K = m_ObjHelper.GetRK(HLHandle); bool Success = K != DXIL::ResourceKind::Invalid; if (Success) { *ppName = hlsl::GetResourceKindName(K); } return Success; } private: HLObjectOperationLowerHelper &m_ObjHelper; }; using IntrinsicLowerFuncTy = Value *(CallInst *CI, IntrinsicOp IOP, DXIL::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated); struct IntrinsicLower { // Intrinsic opcode. IntrinsicOp IntriOpcode; // Lower function. IntrinsicLowerFuncTy &LowerFunc; // DXIL opcode if can direct map. DXIL::OpCode DxilOpcode; }; // IOP intrinsics. namespace { Value *TrivialDxilOperation(Function *dxilFunc, OP::OpCode opcode, ArrayRef<Value *> refArgs, Type *Ty, Type *RetTy, OP *hlslOP, IRBuilder<> &Builder) { unsigned argNum = refArgs.size(); std::vector<Value *> args = refArgs; if (Ty->isVectorTy()) { Value *retVal = llvm::UndefValue::get(RetTy); unsigned vecSize = Ty->getVectorNumElements(); for (unsigned i = 0; i < vecSize; i++) { // Update vector args, skip known opcode arg. for (unsigned argIdx = HLOperandIndex::kUnaryOpSrc0Idx; argIdx < argNum; argIdx++) { if (refArgs[argIdx]->getType()->isVectorTy()) { Value *arg = refArgs[argIdx]; args[argIdx] = Builder.CreateExtractElement(arg, i); } } Value *EltOP = Builder.CreateCall(dxilFunc, args, hlslOP->GetOpCodeName(opcode)); retVal = Builder.CreateInsertElement(retVal, EltOP, i); } return retVal; } else { if (!RetTy->isVoidTy()) { Value *retVal = Builder.CreateCall(dxilFunc, args, hlslOP->GetOpCodeName(opcode)); return retVal; } else { // Cannot add name to void. return Builder.CreateCall(dxilFunc, args); } } } // Generates a DXIL operation over an overloaded type (Ty), returning a // RetTy value; when Ty is a vector, it will replicate per-element operations // into RetTy to rebuild it. Value *TrivialDxilOperation(OP::OpCode opcode, ArrayRef<Value *> refArgs, Type *Ty, Type *RetTy, OP *hlslOP, IRBuilder<> &Builder) { Type *EltTy = Ty->getScalarType(); Function *dxilFunc = hlslOP->GetOpFunc(opcode, EltTy); return TrivialDxilOperation(dxilFunc, opcode, refArgs, Ty, RetTy, hlslOP, Builder); } Value *TrivialDxilOperation(OP::OpCode opcode, ArrayRef<Value *> refArgs, Type *Ty, Instruction *Inst, OP *hlslOP) { DXASSERT(refArgs.size() > 0, "else opcode isn't in signature"); DXASSERT(refArgs[0] == nullptr, "else caller has already filled the value in"); IRBuilder<> B(Inst); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); const_cast<llvm::Value **>(refArgs.data())[0] = opArg; // actually stack memory from caller return TrivialDxilOperation(opcode, refArgs, Ty, Inst->getType(), hlslOP, B); } Value *TrivialDxilUnaryOperationRet(OP::OpCode opcode, Value *src, Type *RetTy, hlsl::OP *hlslOP, IRBuilder<> &Builder) { Type *Ty = src->getType(); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); Value *args[] = {opArg, src}; return TrivialDxilOperation(opcode, args, Ty, RetTy, hlslOP, Builder); } Value *TrivialDxilUnaryOperation(OP::OpCode opcode, Value *src, hlsl::OP *hlslOP, IRBuilder<> &Builder) { return TrivialDxilUnaryOperationRet(opcode, src, src->getType(), hlslOP, Builder); } Value *TrivialDxilBinaryOperation(OP::OpCode opcode, Value *src0, Value *src1, hlsl::OP *hlslOP, IRBuilder<> &Builder) { Type *Ty = src0->getType(); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); Value *args[] = {opArg, src0, src1}; return TrivialDxilOperation(opcode, args, Ty, Ty, hlslOP, Builder); } Value *TrivialDxilTrinaryOperation(OP::OpCode opcode, Value *src0, Value *src1, Value *src2, hlsl::OP *hlslOP, IRBuilder<> &Builder) { Type *Ty = src0->getType(); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); Value *args[] = {opArg, src0, src1, src2}; return TrivialDxilOperation(opcode, args, Ty, Ty, hlslOP, Builder); } Value *TrivialUnaryOperation(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *src0 = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); IRBuilder<> Builder(CI); hlsl::OP *hlslOP = &helper.hlslOP; Value *retVal = TrivialDxilUnaryOperationRet(opcode, src0, CI->getType(), hlslOP, Builder); return retVal; } Value *TrivialBinaryOperation(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *src0 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *src1 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); IRBuilder<> Builder(CI); Value *binOp = TrivialDxilBinaryOperation(opcode, src0, src1, hlslOP, Builder); return binOp; } Value *TrivialTrinaryOperation(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *src0 = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc0Idx); Value *src1 = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc1Idx); Value *src2 = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc2Idx); IRBuilder<> Builder(CI); Value *triOp = TrivialDxilTrinaryOperation(opcode, src0, src1, src2, hlslOP, Builder); return triOp; } Value *TrivialIsSpecialFloat(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *src = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); IRBuilder<> Builder(CI); Type *Ty = src->getType(); Type *RetTy = Type::getInt1Ty(CI->getContext()); if (Ty->isVectorTy()) RetTy = VectorType::get(RetTy, Ty->getVectorNumElements()); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); Value *args[] = {opArg, src}; return TrivialDxilOperation(opcode, args, Ty, RetTy, hlslOP, Builder); } bool IsResourceGEP(GetElementPtrInst *I) { Type *Ty = I->getType()->getPointerElementType(); Ty = dxilutil::GetArrayEltTy(Ty); // Only mark on GEP which point to resource. return dxilutil::IsHLSLResourceType(Ty); } Value *TranslateNonUniformResourceIndex( CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *V = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Type *hdlTy = helper.hlslOP.GetHandleType(); for (User *U : CI->users()) { if (GetElementPtrInst *I = dyn_cast<GetElementPtrInst>(U)) { // Only mark on GEP which point to resource. if (IsResourceGEP(I)) DxilMDHelper::MarkNonUniform(I); } else if (CastInst *castI = dyn_cast<CastInst>(U)) { for (User *castU : castI->users()) { if (GetElementPtrInst *I = dyn_cast<GetElementPtrInst>(castU)) { // Only mark on GEP which point to resource. if (IsResourceGEP(I)) DxilMDHelper::MarkNonUniform(I); } else if (CallInst *CI = dyn_cast<CallInst>(castU)) { if (CI->getType() == hdlTy) DxilMDHelper::MarkNonUniform(CI); } } } else if (CallInst *CI = dyn_cast<CallInst>(U)) { if (CI->getType() == hdlTy) DxilMDHelper::MarkNonUniform(CI); } } CI->replaceAllUsesWith(V); return nullptr; } Value *TrivialBarrier(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *OP = &helper.hlslOP; Function *dxilFunc = OP->GetOpFunc(OP::OpCode::Barrier, CI->getType()); Constant *opArg = OP->GetU32Const((unsigned)OP::OpCode::Barrier); unsigned uglobal = static_cast<unsigned>(DXIL::BarrierMode::UAVFenceGlobal); unsigned g = static_cast<unsigned>(DXIL::BarrierMode::TGSMFence); unsigned t = static_cast<unsigned>(DXIL::BarrierMode::SyncThreadGroup); // unsigned ut = // static_cast<unsigned>(DXIL::BarrierMode::UAVFenceThreadGroup); unsigned barrierMode = 0; switch (IOP) { case IntrinsicOp::IOP_AllMemoryBarrier: barrierMode = uglobal | g; break; case IntrinsicOp::IOP_AllMemoryBarrierWithGroupSync: barrierMode = uglobal | g | t; break; case IntrinsicOp::IOP_GroupMemoryBarrier: barrierMode = g; break; case IntrinsicOp::IOP_GroupMemoryBarrierWithGroupSync: barrierMode = g | t; break; case IntrinsicOp::IOP_DeviceMemoryBarrier: barrierMode = uglobal; break; case IntrinsicOp::IOP_DeviceMemoryBarrierWithGroupSync: barrierMode = uglobal | t; break; default: DXASSERT(0, "invalid opcode for barrier"); break; } Value *src0 = OP->GetU32Const(static_cast<unsigned>(barrierMode)); Value *args[] = {opArg, src0}; IRBuilder<> Builder(CI); Builder.CreateCall(dxilFunc, args); return nullptr; } Value *TranslateD3DColorToUByte4(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { IRBuilder<> Builder(CI); Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Type *Ty = val->getType(); // Use the same scaling factor used by FXC (i.e., 255.001953) // Excerpt from stackoverflow discussion: // "Built-in rounding, necessary because of truncation. 0.001953 * 256 = 0.5" Constant *toByteConst = ConstantFP::get(Ty->getScalarType(), 255.001953); if (Ty->isVectorTy()) { static constexpr int supportedVecElemCount = 4; if (Ty->getVectorNumElements() == supportedVecElemCount) { toByteConst = ConstantVector::getSplat(supportedVecElemCount, toByteConst); // Swizzle the input val -> val.zyxw std::vector<int> mask{2, 1, 0, 3}; val = Builder.CreateShuffleVector(val, val, mask); } else { llvm_unreachable( "Unsupported input type for intrinsic D3DColorToUByte4."); return UndefValue::get(CI->getType()); } } Value *byte4 = Builder.CreateFMul(toByteConst, val); return Builder.CreateCast(Instruction::CastOps::FPToSI, byte4, CI->getType()); } // Returns true if pow can be implemented using Fxc's mul-only code gen pattern. // Fxc uses the below rules when choosing mul-only code gen pattern to implement // pow function. Rule 1: Applicable only to power values in the range // [INT32_MIN, INT32_MAX] Rule 2: The maximum number of mul ops needed shouldn't // exceed (2n+1) or (n+1) based on whether the power // is a positive or a negative value. Here "n" is the number of scalar // elements in power. // Rule 3: Power must be an exact value. // +----------+---------------------+------------------+ // | BaseType | IsExponentPositive | MaxMulOpsAllowed | // +----------+---------------------+------------------+ // | float4x4 | True | 33 | // | float4x4 | False | 17 | // | float4x2 | True | 17 | // | float4x2 | False | 9 | // | float2x4 | True | 17 | // | float2x4 | False | 9 | // | float4 | True | 9 | // | float4 | False | 5 | // | float2 | True | 5 | // | float2 | False | 3 | // | float | True | 3 | // | float | False | 2 | // +----------+---------------------+------------------+ bool CanUseFxcMulOnlyPatternForPow(IRBuilder<> &Builder, Value *x, Value *pow, int32_t &powI) { // Applicable only when power is a literal. if (!isa<ConstantDataVector>(pow) && !isa<ConstantFP>(pow)) { return false; } // Only apply this code gen on splat values. if (ConstantDataVector *cdv = dyn_cast<ConstantDataVector>(pow)) { if (!hlsl::dxilutil::IsSplat(cdv)) { return false; } } APFloat powAPF = isa<ConstantDataVector>(pow) ? cast<ConstantDataVector>(pow)->getElementAsAPFloat(0) : // should be a splat value cast<ConstantFP>(pow)->getValueAPF(); APSInt powAPS(32, false); bool isExact = false; // Try converting float value of power to integer and also check if the float // value is exact. APFloat::opStatus status = powAPF.convertToInteger(powAPS, APFloat::rmTowardZero, &isExact); if (status == APFloat::opStatus::opOK && isExact) { powI = powAPS.getExtValue(); uint32_t powU = abs(powI); int setBitCount = 0; int maxBitSetPos = -1; for (int i = 0; i < 32; i++) { if ((powU >> i) & 1) { setBitCount++; maxBitSetPos = i; } } DXASSERT(maxBitSetPos <= 30, "msb should always be zero."); unsigned numElem = isa<ConstantDataVector>(pow) ? x->getType()->getVectorNumElements() : 1; int mulOpThreshold = powI < 0 ? numElem + 1 : 2 * numElem + 1; int mulOpNeeded = maxBitSetPos + setBitCount - 1; return mulOpNeeded <= mulOpThreshold; } return false; } Value *TranslatePowUsingFxcMulOnlyPattern(IRBuilder<> &Builder, Value *x, const int32_t y) { uint32_t absY = abs(y); // If y is zero then always return 1. if (absY == 0) { return ConstantFP::get(x->getType(), 1); } int lastSetPos = -1; Value *result = nullptr; Value *mul = nullptr; for (int i = 0; i < 32; i++) { if ((absY >> i) & 1) { for (int j = i; j > lastSetPos; j--) { if (!mul) { mul = x; } else { mul = Builder.CreateFMul(mul, mul); } } result = (result == nullptr) ? mul : Builder.CreateFMul(result, mul); lastSetPos = i; } } // Compute reciprocal for negative power values. if (y < 0) { Value *constOne = ConstantFP::get(x->getType(), 1); result = Builder.CreateFDiv(constOne, result); } return result; } Value *TranslatePowImpl(hlsl::OP *hlslOP, IRBuilder<> &Builder, Value *x, Value *y, bool isFXCCompatMode = false) { // As applicable implement pow using only mul ops as done by Fxc. int32_t p = 0; if (CanUseFxcMulOnlyPatternForPow(Builder, x, y, p)) { if (isFXCCompatMode) { return TranslatePowUsingFxcMulOnlyPattern(Builder, x, p); } else if (p == 2) { // Only take care 2 for it will not affect register pressure. return Builder.CreateFMul(x, x); } } // Default to log-mul-exp pattern if previous scenarios don't apply. // t = log(x); Value *logX = TrivialDxilUnaryOperation(DXIL::OpCode::Log, x, hlslOP, Builder); // t = y * t; Value *mulY = Builder.CreateFMul(logX, y); // pow = exp(t); return TrivialDxilUnaryOperation(DXIL::OpCode::Exp, mulY, hlslOP, Builder); } Value *TranslateAddUint64(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; IRBuilder<> Builder(CI); Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Type *Ty = val->getType(); VectorType *VT = dyn_cast<VectorType>(Ty); if (!VT) { dxilutil::EmitErrorOnInstruction( CI, "AddUint64 can only be applied to uint2 and uint4 operands."); return UndefValue::get(Ty); } unsigned size = VT->getNumElements(); if (size != 2 && size != 4) { dxilutil::EmitErrorOnInstruction( CI, "AddUint64 can only be applied to uint2 and uint4 operands."); return UndefValue::get(Ty); } Value *op0 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *op1 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); Value *RetVal = UndefValue::get(Ty); Function *AddC = hlslOP->GetOpFunc(DXIL::OpCode::UAddc, helper.i32Ty); Value *opArg = Builder.getInt32(static_cast<unsigned>(DXIL::OpCode::UAddc)); for (unsigned i = 0; i < size; i += 2) { Value *low0 = Builder.CreateExtractElement(op0, i); Value *low1 = Builder.CreateExtractElement(op1, i); Value *lowWithC = Builder.CreateCall(AddC, {opArg, low0, low1}); Value *low = Builder.CreateExtractValue(lowWithC, 0); RetVal = Builder.CreateInsertElement(RetVal, low, i); Value *carry = Builder.CreateExtractValue(lowWithC, 1); // Ext i1 to i32 carry = Builder.CreateZExt(carry, helper.i32Ty); Value *hi0 = Builder.CreateExtractElement(op0, i + 1); Value *hi1 = Builder.CreateExtractElement(op1, i + 1); Value *hi = Builder.CreateAdd(hi0, hi1); hi = Builder.CreateAdd(hi, carry); RetVal = Builder.CreateInsertElement(RetVal, hi, i + 1); } return RetVal; } bool IsValidLoadInput(Value *V) { // Must be load input. // TODO: report this error on front-end if (!V || !isa<CallInst>(V)) { return false; } CallInst *CI = cast<CallInst>(V); // Must be immediate. ConstantInt *opArg = cast<ConstantInt>(CI->getArgOperand(DXIL::OperandIndex::kOpcodeIdx)); DXIL::OpCode op = static_cast<DXIL::OpCode>(opArg->getLimitedValue()); if (op != DXIL::OpCode::LoadInput) { return false; } return true; } // Tunnel through insert/extract element and shuffle to find original source // of scalar value, or specified element (vecIdx) of vector value. Value *FindScalarSource(Value *src, unsigned vecIdx = 0) { Type *srcTy = src->getType()->getScalarType(); while (src && !isa<UndefValue>(src)) { if (src->getType()->isVectorTy()) { if (InsertElementInst *IE = dyn_cast<InsertElementInst>(src)) { unsigned curIdx = (unsigned)cast<ConstantInt>(IE->getOperand(2)) ->getUniqueInteger() .getLimitedValue(); src = IE->getOperand((curIdx == vecIdx) ? 1 : 0); } else if (ShuffleVectorInst *SV = dyn_cast<ShuffleVectorInst>(src)) { int newIdx = SV->getMaskValue(vecIdx); if (newIdx < 0) return UndefValue::get(srcTy); vecIdx = (unsigned)newIdx; src = SV->getOperand(0); unsigned numElt = src->getType()->getVectorNumElements(); if (numElt <= vecIdx) { vecIdx -= numElt; src = SV->getOperand(1); } } else { return UndefValue::get(srcTy); // Didn't find it. } } else { if (ExtractElementInst *EE = dyn_cast<ExtractElementInst>(src)) { vecIdx = (unsigned)cast<ConstantInt>(EE->getIndexOperand()) ->getUniqueInteger() .getLimitedValue(); src = EE->getVectorOperand(); } else if (hlsl::dxilutil::IsConvergentMarker(src)) { src = hlsl::dxilutil::GetConvergentSource(src); } else { break; // Found it. } } } return src; } // Finds corresponding inputs, calls translation for each, and returns // resulting vector or scalar. // Uses functor that takes (inputElemID, rowIdx, colIdx), and returns // translation for one input scalar. Value *TranslateEvalHelper( CallInst *CI, Value *val, IRBuilder<> &Builder, std::function<Value *(Value *, Value *, Value *)> fnTranslateScalarInput) { Type *Ty = CI->getType(); Value *result = UndefValue::get(Ty); if (Ty->isVectorTy()) { for (unsigned i = 0; i < Ty->getVectorNumElements(); ++i) { Value *InputEl = FindScalarSource(val, i); if (!IsValidLoadInput(InputEl)) { dxilutil::EmitErrorOnInstruction( CI, "attribute evaluation can only be done " "on values taken directly from inputs."); return result; } CallInst *loadInput = cast<CallInst>(InputEl); Value *inputElemID = loadInput->getArgOperand(DXIL::OperandIndex::kLoadInputIDOpIdx); Value *rowIdx = loadInput->getArgOperand(DXIL::OperandIndex::kLoadInputRowOpIdx); Value *colIdx = loadInput->getArgOperand(DXIL::OperandIndex::kLoadInputColOpIdx); Value *Elt = fnTranslateScalarInput(inputElemID, rowIdx, colIdx); result = Builder.CreateInsertElement(result, Elt, i); } } else { Value *InputEl = FindScalarSource(val); if (!IsValidLoadInput(InputEl)) { dxilutil::EmitErrorOnInstruction(CI, "attribute evaluation can only be done " "on values taken directly from inputs."); return result; } CallInst *loadInput = cast<CallInst>(InputEl); Value *inputElemID = loadInput->getArgOperand(DXIL::OperandIndex::kLoadInputIDOpIdx); Value *rowIdx = loadInput->getArgOperand(DXIL::OperandIndex::kLoadInputRowOpIdx); Value *colIdx = loadInput->getArgOperand(DXIL::OperandIndex::kLoadInputColOpIdx); result = fnTranslateScalarInput(inputElemID, rowIdx, colIdx); } return result; } Value *TranslateEvalSample(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *val = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *sampleIdx = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); IRBuilder<> Builder(CI); OP::OpCode opcode = OP::OpCode::EvalSampleIndex; Value *opArg = hlslOP->GetU32Const((unsigned)opcode); Function *evalFunc = hlslOP->GetOpFunc(opcode, CI->getType()->getScalarType()); return TranslateEvalHelper( CI, val, Builder, [&](Value *inputElemID, Value *rowIdx, Value *colIdx) -> Value * { return Builder.CreateCall( evalFunc, {opArg, inputElemID, rowIdx, colIdx, sampleIdx}); }); } Value *TranslateEvalSnapped(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *val = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *offset = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); IRBuilder<> Builder(CI); Value *offsetX = Builder.CreateExtractElement(offset, (uint64_t)0); Value *offsetY = Builder.CreateExtractElement(offset, 1); OP::OpCode opcode = OP::OpCode::EvalSnapped; Value *opArg = hlslOP->GetU32Const((unsigned)opcode); Function *evalFunc = hlslOP->GetOpFunc(opcode, CI->getType()->getScalarType()); return TranslateEvalHelper( CI, val, Builder, [&](Value *inputElemID, Value *rowIdx, Value *colIdx) -> Value * { return Builder.CreateCall( evalFunc, {opArg, inputElemID, rowIdx, colIdx, offsetX, offsetY}); }); } Value *TranslateEvalCentroid(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *val = CI->getArgOperand(DXIL::OperandIndex::kUnarySrc0OpIdx); IRBuilder<> Builder(CI); OP::OpCode opcode = OP::OpCode::EvalCentroid; Value *opArg = hlslOP->GetU32Const((unsigned)opcode); Function *evalFunc = hlslOP->GetOpFunc(opcode, CI->getType()->getScalarType()); return TranslateEvalHelper( CI, val, Builder, [&](Value *inputElemID, Value *rowIdx, Value *colIdx) -> Value * { return Builder.CreateCall(evalFunc, {opArg, inputElemID, rowIdx, colIdx}); }); } /* HLSL: bool RWDispatchNodeInputRecord<recordType>::FinishedCrossGroupSharing() DXIL: i1 @dx.op.finishedCrossGroupSharing(i32 %Opcode, %dx.types.NodeRecordHandle %NodeInputRecordHandle) */ Value *TranslateNodeFinishedCrossGroupSharing( CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *OP = &helper.hlslOP; Function *dxilFunc = OP->GetOpFunc(op, Type::getVoidTy(CI->getContext())); Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); DXASSERT_NOMSG(handle->getType() == OP->GetNodeRecordHandleType()); Value *opArg = OP->GetU32Const((unsigned)op); IRBuilder<> Builder(CI); return Builder.CreateCall(dxilFunc, {opArg, handle}); } /* HLSL: bool NodeOutput<recordType>::IsValid() bool EmptyNodeOutput::IsValid() DXIL: i1 @dx.op.nodeOutputIsValid(i32 %Opcode, %dx.types.NodeHandle %NodeOutputHandle) */ Value *TranslateNodeOutputIsValid(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *OP = &helper.hlslOP; Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); Function *dxilFunc = OP->GetOpFunc(op, Type::getVoidTy(CI->getContext())); Value *opArg = OP->GetU32Const((unsigned)op); IRBuilder<> Builder(CI); return Builder.CreateCall(dxilFunc, {opArg, handle}); } Value *TranslateGetAttributeAtVertex(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { DXASSERT(op == OP::OpCode::AttributeAtVertex, "Wrong opcode to translate"); hlsl::OP *hlslOP = &helper.hlslOP; IRBuilder<> Builder(CI); Value *val = CI->getArgOperand(DXIL::OperandIndex::kBinarySrc0OpIdx); Value *vertexIdx = CI->getArgOperand(DXIL::OperandIndex::kBinarySrc1OpIdx); Value *vertexI8Idx = Builder.CreateTrunc(vertexIdx, Type::getInt8Ty(CI->getContext())); Value *opArg = hlslOP->GetU32Const((unsigned)op); Function *evalFunc = hlslOP->GetOpFunc(op, val->getType()->getScalarType()); return TranslateEvalHelper( CI, val, Builder, [&](Value *inputElemID, Value *rowIdx, Value *colIdx) -> Value * { return Builder.CreateCall( evalFunc, {opArg, inputElemID, rowIdx, colIdx, vertexI8Idx}); }); } /* HLSL: void Barrier(uint MemoryTypeFlags, uint SemanticFlags) void Barrier(Object o, uint SemanticFlags) All UAVs and/or Node Records by types: void @dx.op.barrierByMemoryType(i32 %Opcode, i32 %MemoryTypeFlags, i32 %SemanticFlags) UAV by handle: void @dx.op.barrierByMemoryHandle(i32 %Opcode, %dx.types.Handle %Object, i32 %SemanticFlags) Node Record by handle: void @dx.op.barrierByMemoryHandle(i32 %Opcode, %dx.types.NodeRecordHandle %Object, i32 %SemanticFlags) */ Value *TranslateBarrier(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *OP = &helper.hlslOP; Value *HandleOrMemoryFlags = CI->getArgOperand(HLOperandIndex::kBarrierMemoryTypeFlagsOpIdx); Value *SemanticFlags = CI->getArgOperand(HLOperandIndex::kBarrierSemanticFlagsOpIdx); IRBuilder<> Builder(CI); if (HandleOrMemoryFlags->getType()->isIntegerTy()) { op = OP::OpCode::BarrierByMemoryType; } else if (HandleOrMemoryFlags->getType() == OP->GetHandleType()) { op = OP::OpCode::BarrierByMemoryHandle; } else if (HandleOrMemoryFlags->getType() == OP->GetNodeRecordHandleType()) { op = OP::OpCode::BarrierByNodeRecordHandle; } else { DXASSERT(false, "Shouldn't get here"); } Function *dxilFunc = OP->GetOpFunc(op, CI->getType()); Constant *opArg = OP->GetU32Const((unsigned)op); Value *args[] = {opArg, HandleOrMemoryFlags, SemanticFlags}; Builder.CreateCall(dxilFunc, args); return nullptr; } Value *TranslateGetGroupOrThreadNodeOutputRecords( CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool isPerThreadRecord, bool &Translated) { IRBuilder<> Builder(CI); hlsl::OP *OP = &helper.hlslOP; Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); Function *dxilFunc = OP->GetOpFunc(op, Builder.getVoidTy()); Value *opArg = OP->GetU32Const((unsigned)op); Value *count = CI->getArgOperand(HLOperandIndex::kAllocateRecordNumRecordsIdx); Value *perThread = OP->GetI1Const(isPerThreadRecord); Value *args[] = {opArg, handle, count, perThread}; return Builder.CreateCall(dxilFunc, args); } /* HLSL: GroupNodeOutputRecords<recordType> NodeOutput<recordType>::GetGroupNodeOutputRecords(uint numRecords); DXIL: %dx.types.NodeRecordHandle @dx.op.allocateNodeOutputRecords(i32 %Opcode, %dx.types.NodeHandle %NodeOutputHandle, i32 %NumRecords, i1 %PerThread) */ Value * TranslateGetGroupNodeOutputRecords(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { return TranslateGetGroupOrThreadNodeOutputRecords( CI, IOP, op, helper, pObjHelper, /* isPerThreadRecord */ false, Translated); } /* HLSL: ThreadNodeOutputRecords<recordType> NodeOutput<recordType>::GetThreadNodeOutputRecords(uint numRecords) DXIL: %dx.types.NodeRecordHandle @dx.op.allocateNodeOutputRecords(i32 %Opcode, %dx.types.NodeHandle %NodeOutputHandle, i32 %NumRecords, i1 %PerThread) */ Value *TranslateGetThreadNodeOutputRecords( CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { return TranslateGetGroupOrThreadNodeOutputRecords( CI, IOP, op, helper, pObjHelper, /* isPerThreadRecord */ true, Translated); } /* HLSL: uint EmptyNodeInput::Count() uint GroupNodeInputRecords<recordType>::Count() uint RWGroupNodeInputRecords<recordType>::Count() DXIL: i32 @dx.op.getInputRecordCount(i32 %Opcode, %dx.types.NodeRecordHandle %NodeInputHandle) */ Value * TranslateNodeGetInputRecordCount(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *OP = &helper.hlslOP; Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); DXASSERT_NOMSG(handle->getType() == OP->GetNodeRecordHandleType()); Function *dxilFunc = OP->GetOpFunc(op, Type::getVoidTy(CI->getContext())); Value *opArg = OP->GetU32Const((unsigned)op); Value *args[] = {opArg, handle}; IRBuilder<> Builder(CI); return Builder.CreateCall(dxilFunc, args); } Value *TrivialNoArgOperation(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Type *Ty = Type::getVoidTy(CI->getContext()); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); Value *args[] = {opArg}; IRBuilder<> Builder(CI); Value *dxilOp = TrivialDxilOperation(opcode, args, Ty, Ty, hlslOP, Builder); return dxilOp; } Value *TrivialNoArgWithRetOperation(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Type *Ty = CI->getType(); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); Value *args[] = {opArg}; IRBuilder<> Builder(CI); Value *dxilOp = TrivialDxilOperation(opcode, args, Ty, Ty, hlslOP, Builder); return dxilOp; } Value *TranslateGetRTSamplePos(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; OP::OpCode opcode = OP::OpCode::RenderTargetGetSamplePosition; IRBuilder<> Builder(CI); Type *Ty = Type::getVoidTy(CI->getContext()); Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); Value *args[] = {opArg, val}; Value *samplePos = TrivialDxilOperation(opcode, args, Ty, Ty, hlslOP, Builder); Value *result = UndefValue::get(CI->getType()); Value *samplePosX = Builder.CreateExtractValue(samplePos, 0); Value *samplePosY = Builder.CreateExtractValue(samplePos, 1); result = Builder.CreateInsertElement(result, samplePosX, (uint64_t)0); result = Builder.CreateInsertElement(result, samplePosY, 1); return result; } // val QuadReadLaneAt(val, uint); Value *TranslateQuadReadLaneAt(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *refArgs[] = {nullptr, CI->getOperand(1), CI->getOperand(2)}; return TrivialDxilOperation(DXIL::OpCode::QuadReadLaneAt, refArgs, CI->getOperand(1)->getType(), CI, hlslOP); } // Quad intrinsics of the form fn(val,QuadOpKind)->val Value *TranslateQuadAnyAll(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; DXIL::QuadVoteOpKind opKind; switch (IOP) { case IntrinsicOp::IOP_QuadAll: opKind = DXIL::QuadVoteOpKind::All; break; case IntrinsicOp::IOP_QuadAny: opKind = DXIL::QuadVoteOpKind::Any; break; default: llvm_unreachable( "QuadAny/QuadAll translation called with wrong isntruction"); } Constant *OpArg = hlslOP->GetI8Const((unsigned)opKind); Value *refArgs[] = {nullptr, CI->getOperand(1), OpArg}; return TrivialDxilOperation(DXIL::OpCode::QuadVote, refArgs, CI->getOperand(1)->getType(), CI, hlslOP); } // Wave intrinsics of the form fn(val,QuadOpKind)->val Value *TranslateQuadReadAcross(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; DXIL::QuadOpKind opKind; switch (IOP) { case IntrinsicOp::IOP_QuadReadAcrossX: opKind = DXIL::QuadOpKind::ReadAcrossX; break; case IntrinsicOp::IOP_QuadReadAcrossY: opKind = DXIL::QuadOpKind::ReadAcrossY; break; default: DXASSERT_NOMSG(IOP == IntrinsicOp::IOP_QuadReadAcrossDiagonal); LLVM_FALLTHROUGH; case IntrinsicOp::IOP_QuadReadAcrossDiagonal: opKind = DXIL::QuadOpKind::ReadAcrossDiagonal; break; } Constant *OpArg = hlslOP->GetI8Const((unsigned)opKind); Value *refArgs[] = {nullptr, CI->getOperand(1), OpArg}; return TrivialDxilOperation(DXIL::OpCode::QuadOp, refArgs, CI->getOperand(1)->getType(), CI, hlslOP); } // WaveAllEqual(val<n>)->bool<n> Value *TranslateWaveAllEqual(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *src = CI->getArgOperand(HLOperandIndex::kWaveAllEqualValueOpIdx); IRBuilder<> Builder(CI); Type *Ty = src->getType(); Type *RetTy = Type::getInt1Ty(CI->getContext()); if (Ty->isVectorTy()) RetTy = VectorType::get(RetTy, Ty->getVectorNumElements()); Constant *opArg = hlslOP->GetU32Const((unsigned)DXIL::OpCode::WaveActiveAllEqual); Value *args[] = {opArg, src}; return TrivialDxilOperation(DXIL::OpCode::WaveActiveAllEqual, args, Ty, RetTy, hlslOP, Builder); } // WaveMatch(val<n>)->uint4 Value *TranslateWaveMatch(CallInst *CI, IntrinsicOp IOP, OP::OpCode Opc, HLOperationLowerHelper &Helper, HLObjectOperationLowerHelper *ObjHelper, bool &Translated) { hlsl::OP *Op = &Helper.hlslOP; IRBuilder<> Builder(CI); // Generate a dx.op.waveMatch call for each scalar in the input, and perform // a bitwise AND between each result to derive the final bitmask in the case // of vector inputs. // (1) Collect the list of all scalar inputs (e.g. decompose vectors) SmallVector<Value *, 4> ScalarInputs; Value *Val = CI->getArgOperand(1); Type *ValTy = Val->getType(); Type *EltTy = ValTy->getScalarType(); if (ValTy->isVectorTy()) { for (uint64_t i = 0, e = ValTy->getVectorNumElements(); i != e; ++i) { Value *Elt = Builder.CreateExtractElement(Val, i); ScalarInputs.push_back(Elt); } } else { ScalarInputs.push_back(Val); } Value *Res = nullptr; Constant *OpcArg = Op->GetU32Const((unsigned)DXIL::OpCode::WaveMatch); Value *Fn = Op->GetOpFunc(OP::OpCode::WaveMatch, EltTy); // (2) For each scalar, emit a call to dx.op.waveMatch. If this is not the // first scalar, then AND the result with the accumulator. for (unsigned i = 0, e = ScalarInputs.size(); i != e; ++i) { Value *Args[] = {OpcArg, ScalarInputs[i]}; Value *Call = Builder.CreateCall(Fn, Args); if (Res) { // Generate bitwise AND of the components for (unsigned j = 0; j != 4; ++j) { Value *ResVal = Builder.CreateExtractValue(Res, j); Value *CallVal = Builder.CreateExtractValue(Call, j); Value *And = Builder.CreateAnd(ResVal, CallVal); Res = Builder.CreateInsertValue(Res, And, j); } } else { Res = Call; } } // (3) Convert the final aggregate into a vector to make the types match Value *ResVec = UndefValue::get(CI->getType()); for (unsigned i = 0; i != 4; ++i) { Value *Elt = Builder.CreateExtractValue(Res, i); ResVec = Builder.CreateInsertElement(ResVec, Elt, i); } return ResVec; } // Wave intrinsics of the form fn(valA)->valB, where no overloading takes place Value *TranslateWaveA2B(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *refArgs[] = {nullptr, CI->getOperand(1)}; return TrivialDxilOperation(opcode, refArgs, helper.voidTy, CI, hlslOP); } // Wave ballot intrinsic. Value *TranslateWaveBallot(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { // The high-level operation is uint4 ballot(i1). // The DXIL operation is struct.u4 ballot(i1). // To avoid updating users with more than a simple replace, we translate into // a call into struct.u4, then reassemble the vector. // Scalarization and constant propagation take care of cleanup. IRBuilder<> B(CI); // Make the DXIL call itself. hlsl::OP *hlslOP = &helper.hlslOP; Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); Value *refArgs[] = {opArg, CI->getOperand(1)}; Function *dxilFunc = hlslOP->GetOpFunc(opcode, Type::getVoidTy(CI->getContext())); Value *dxilVal = B.CreateCall(dxilFunc, refArgs, hlslOP->GetOpCodeName(opcode)); // Assign from the call results into a vector. Type *ResTy = CI->getType(); DXASSERT_NOMSG(ResTy->isVectorTy() && ResTy->getVectorNumElements() == 4); DXASSERT_NOMSG(dxilVal->getType()->isStructTy() && dxilVal->getType()->getNumContainedTypes() == 4); // 'x' component is the first vector element, highest bits. Value *ResVal = llvm::UndefValue::get(ResTy); for (unsigned Idx = 0; Idx < 4; ++Idx) { ResVal = B.CreateInsertElement( ResVal, B.CreateExtractValue(dxilVal, ArrayRef<unsigned>(Idx)), Idx); } return ResVal; } static bool WaveIntrinsicNeedsSign(OP::OpCode opcode) { return opcode == OP::OpCode::WaveActiveOp || opcode == OP::OpCode::WavePrefixOp; } static unsigned WaveIntrinsicToSignedOpKind(IntrinsicOp IOP) { if (IOP == IntrinsicOp::IOP_WaveActiveUMax || IOP == IntrinsicOp::IOP_WaveActiveUMin || IOP == IntrinsicOp::IOP_WaveActiveUSum || IOP == IntrinsicOp::IOP_WaveActiveUProduct || IOP == IntrinsicOp::IOP_WaveMultiPrefixUProduct || IOP == IntrinsicOp::IOP_WaveMultiPrefixUSum || IOP == IntrinsicOp::IOP_WavePrefixUSum || IOP == IntrinsicOp::IOP_WavePrefixUProduct) return (unsigned)DXIL::SignedOpKind::Unsigned; return (unsigned)DXIL::SignedOpKind::Signed; } static unsigned WaveIntrinsicToOpKind(IntrinsicOp IOP) { switch (IOP) { // Bit operations. case IntrinsicOp::IOP_WaveActiveBitOr: return (unsigned)DXIL::WaveBitOpKind::Or; case IntrinsicOp::IOP_WaveActiveBitAnd: return (unsigned)DXIL::WaveBitOpKind::And; case IntrinsicOp::IOP_WaveActiveBitXor: return (unsigned)DXIL::WaveBitOpKind::Xor; // Prefix operations. case IntrinsicOp::IOP_WavePrefixSum: case IntrinsicOp::IOP_WavePrefixUSum: return (unsigned)DXIL::WaveOpKind::Sum; case IntrinsicOp::IOP_WavePrefixProduct: case IntrinsicOp::IOP_WavePrefixUProduct: return (unsigned)DXIL::WaveOpKind::Product; // Numeric operations. case IntrinsicOp::IOP_WaveActiveMax: case IntrinsicOp::IOP_WaveActiveUMax: return (unsigned)DXIL::WaveOpKind::Max; case IntrinsicOp::IOP_WaveActiveMin: case IntrinsicOp::IOP_WaveActiveUMin: return (unsigned)DXIL::WaveOpKind::Min; case IntrinsicOp::IOP_WaveActiveSum: case IntrinsicOp::IOP_WaveActiveUSum: return (unsigned)DXIL::WaveOpKind::Sum; case IntrinsicOp::IOP_WaveActiveProduct: case IntrinsicOp::IOP_WaveActiveUProduct: // MultiPrefix operations case IntrinsicOp::IOP_WaveMultiPrefixBitAnd: return (unsigned)DXIL::WaveMultiPrefixOpKind::And; case IntrinsicOp::IOP_WaveMultiPrefixBitOr: return (unsigned)DXIL::WaveMultiPrefixOpKind::Or; case IntrinsicOp::IOP_WaveMultiPrefixBitXor: return (unsigned)DXIL::WaveMultiPrefixOpKind::Xor; case IntrinsicOp::IOP_WaveMultiPrefixProduct: case IntrinsicOp::IOP_WaveMultiPrefixUProduct: return (unsigned)DXIL::WaveMultiPrefixOpKind::Product; case IntrinsicOp::IOP_WaveMultiPrefixSum: case IntrinsicOp::IOP_WaveMultiPrefixUSum: return (unsigned)DXIL::WaveMultiPrefixOpKind::Sum; default: DXASSERT(IOP == IntrinsicOp::IOP_WaveActiveProduct || IOP == IntrinsicOp::IOP_WaveActiveUProduct, "else caller passed incorrect value"); return (unsigned)DXIL::WaveOpKind::Product; } } // Wave intrinsics of the form fn(valA)->valA Value *TranslateWaveA2A(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Constant *kindValInt = hlslOP->GetI8Const(WaveIntrinsicToOpKind(IOP)); Constant *signValInt = hlslOP->GetI8Const(WaveIntrinsicToSignedOpKind(IOP)); Value *refArgs[] = {nullptr, CI->getOperand(1), kindValInt, signValInt}; unsigned refArgCount = _countof(refArgs); if (!WaveIntrinsicNeedsSign(opcode)) refArgCount--; return TrivialDxilOperation(opcode, llvm::ArrayRef<Value *>(refArgs, refArgCount), CI->getOperand(1)->getType(), CI, hlslOP); } // WaveMultiPrefixOP(val<n>, mask) -> val<n> Value *TranslateWaveMultiPrefix(CallInst *CI, IntrinsicOp IOP, OP::OpCode Opc, HLOperationLowerHelper &Helper, HLObjectOperationLowerHelper *ObjHelper, bool &Translated) { hlsl::OP *Op = &Helper.hlslOP; Constant *KindValInt = Op->GetI8Const(WaveIntrinsicToOpKind(IOP)); Constant *SignValInt = Op->GetI8Const(WaveIntrinsicToSignedOpKind(IOP)); // Decompose mask into scalars IRBuilder<> Builder(CI); Value *Mask = CI->getArgOperand(2); Value *Mask0 = Builder.CreateExtractElement(Mask, (uint64_t)0); Value *Mask1 = Builder.CreateExtractElement(Mask, (uint64_t)1); Value *Mask2 = Builder.CreateExtractElement(Mask, (uint64_t)2); Value *Mask3 = Builder.CreateExtractElement(Mask, (uint64_t)3); Value *Args[] = {nullptr, CI->getOperand(1), Mask0, Mask1, Mask2, Mask3, KindValInt, SignValInt}; return TrivialDxilOperation(Opc, Args, CI->getOperand(1)->getType(), CI, Op); } // WaveMultiPrefixBitCount(i1, mask) -> i32 Value *TranslateWaveMultiPrefixBitCount(CallInst *CI, IntrinsicOp IOP, OP::OpCode Opc, HLOperationLowerHelper &Helper, HLObjectOperationLowerHelper *ObjHelper, bool &Translated) { hlsl::OP *Op = &Helper.hlslOP; // Decompose mask into scalars IRBuilder<> Builder(CI); Value *Mask = CI->getArgOperand(2); Value *Mask0 = Builder.CreateExtractElement(Mask, (uint64_t)0); Value *Mask1 = Builder.CreateExtractElement(Mask, (uint64_t)1); Value *Mask2 = Builder.CreateExtractElement(Mask, (uint64_t)2); Value *Mask3 = Builder.CreateExtractElement(Mask, (uint64_t)3); Value *Args[] = {nullptr, CI->getOperand(1), Mask0, Mask1, Mask2, Mask3}; return TrivialDxilOperation(Opc, Args, Helper.voidTy, CI, Op); } // Wave intrinsics of the form fn()->val Value *TranslateWaveToVal(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *refArgs[] = {nullptr}; return TrivialDxilOperation(opcode, refArgs, helper.voidTy, CI, hlslOP); } // Wave intrinsics of the form fn(val,lane)->val Value *TranslateWaveReadLaneAt(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *refArgs[] = {nullptr, CI->getOperand(1), CI->getOperand(2)}; return TrivialDxilOperation(DXIL::OpCode::WaveReadLaneAt, refArgs, CI->getOperand(1)->getType(), CI, hlslOP); } // Wave intrinsics of the form fn(val)->val Value *TranslateWaveReadLaneFirst(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *refArgs[] = {nullptr, CI->getOperand(1)}; return TrivialDxilOperation(DXIL::OpCode::WaveReadLaneFirst, refArgs, CI->getOperand(1)->getType(), CI, hlslOP); } Value *TranslateAbs(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Type *pOverloadTy = CI->getType()->getScalarType(); if (pOverloadTy->isFloatingPointTy()) { Value *refArgs[] = {nullptr, CI->getOperand(1)}; return TrivialDxilOperation(DXIL::OpCode::FAbs, refArgs, CI->getType(), CI, hlslOP); } else { Value *src = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); IRBuilder<> Builder(CI); Value *neg = Builder.CreateNeg(src); return TrivialDxilBinaryOperation(DXIL::OpCode::IMax, src, neg, hlslOP, Builder); } } Value *TranslateUAbs(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { return CI->getOperand(HLOperandIndex::kUnaryOpSrc0Idx); // No-op } Value *GenerateCmpNEZero(Value *val, IRBuilder<> Builder) { Type *Ty = val->getType(); Type *EltTy = Ty->getScalarType(); Constant *zero = nullptr; if (EltTy->isFloatingPointTy()) zero = ConstantFP::get(EltTy, 0); else zero = ConstantInt::get(EltTy, 0); if (Ty != EltTy) { zero = ConstantVector::getSplat(Ty->getVectorNumElements(), zero); } if (EltTy->isFloatingPointTy()) return Builder.CreateFCmpUNE(val, zero); else return Builder.CreateICmpNE(val, zero); } Value *TranslateAllForValue(Value *val, IRBuilder<> &Builder) { Value *cond = GenerateCmpNEZero(val, Builder); Type *Ty = val->getType(); Type *EltTy = Ty->getScalarType(); if (Ty != EltTy) { Value *Result = Builder.CreateExtractElement(cond, (uint64_t)0); for (unsigned i = 1; i < Ty->getVectorNumElements(); i++) { Value *Elt = Builder.CreateExtractElement(cond, i); Result = Builder.CreateAnd(Result, Elt); } return Result; } else return cond; } Value *TranslateAll(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); IRBuilder<> Builder(CI); return TranslateAllForValue(val, Builder); } Value *TranslateAny(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); IRBuilder<> Builder(CI); Value *cond = GenerateCmpNEZero(val, Builder); Type *Ty = val->getType(); Type *EltTy = Ty->getScalarType(); if (Ty != EltTy) { Value *Result = Builder.CreateExtractElement(cond, (uint64_t)0); for (unsigned i = 1; i < Ty->getVectorNumElements(); i++) { Value *Elt = Builder.CreateExtractElement(cond, i); Result = Builder.CreateOr(Result, Elt); } return Result; } else return cond; } Value *TranslateBitcast(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Type *Ty = CI->getType(); Value *op = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); IRBuilder<> Builder(CI); return Builder.CreateBitCast(op, Ty); } Value *TranslateDoubleAsUint(Value *x, Value *lo, Value *hi, IRBuilder<> &Builder, hlsl::OP *hlslOP) { Type *Ty = x->getType(); Type *outTy = lo->getType()->getPointerElementType(); DXIL::OpCode opcode = DXIL::OpCode::SplitDouble; Function *dxilFunc = hlslOP->GetOpFunc(opcode, Ty->getScalarType()); Value *opArg = hlslOP->GetU32Const(static_cast<unsigned>(opcode)); if (Ty->isVectorTy()) { Value *retValLo = llvm::UndefValue::get(outTy); Value *retValHi = llvm::UndefValue::get(outTy); unsigned vecSize = Ty->getVectorNumElements(); for (unsigned i = 0; i < vecSize; i++) { Value *Elt = Builder.CreateExtractElement(x, i); Value *EltOP = Builder.CreateCall(dxilFunc, {opArg, Elt}, hlslOP->GetOpCodeName(opcode)); Value *EltLo = Builder.CreateExtractValue(EltOP, 0); retValLo = Builder.CreateInsertElement(retValLo, EltLo, i); Value *EltHi = Builder.CreateExtractValue(EltOP, 1); retValHi = Builder.CreateInsertElement(retValHi, EltHi, i); } Builder.CreateStore(retValLo, lo); Builder.CreateStore(retValHi, hi); } else { Value *retVal = Builder.CreateCall(dxilFunc, {opArg, x}, hlslOP->GetOpCodeName(opcode)); Value *retValLo = Builder.CreateExtractValue(retVal, 0); Value *retValHi = Builder.CreateExtractValue(retVal, 1); Builder.CreateStore(retValLo, lo); Builder.CreateStore(retValHi, hi); } return nullptr; } Value *TranslateAsUint(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { if (CI->getNumArgOperands() == 2) { return TranslateBitcast(CI, IOP, opcode, helper, pObjHelper, Translated); } else { DXASSERT_NOMSG(CI->getNumArgOperands() == 4); hlsl::OP *hlslOP = &helper.hlslOP; Value *x = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc0Idx); DXASSERT_NOMSG(x->getType()->getScalarType()->isDoubleTy()); Value *lo = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc1Idx); Value *hi = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc2Idx); IRBuilder<> Builder(CI); return TranslateDoubleAsUint(x, lo, hi, Builder, hlslOP); } } Value *TranslateAsDouble(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *x = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *y = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); Value *opArg = hlslOP->GetU32Const(static_cast<unsigned>(opcode)); IRBuilder<> Builder(CI); return TrivialDxilOperation(opcode, {opArg, x, y}, CI->getType(), CI->getType(), hlslOP, Builder); } Value *TranslateAtan2(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *y = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *x = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); IRBuilder<> Builder(CI); Value *tan = Builder.CreateFDiv(y, x); Value *atan = TrivialDxilUnaryOperation(OP::OpCode::Atan, tan, hlslOP, Builder); // Modify atan result based on https://en.wikipedia.org/wiki/Atan2. Type *Ty = x->getType(); Constant *pi = ConstantFP::get(Ty->getScalarType(), M_PI); Constant *halfPi = ConstantFP::get(Ty->getScalarType(), M_PI / 2); Constant *negHalfPi = ConstantFP::get(Ty->getScalarType(), -M_PI / 2); Constant *zero = ConstantFP::get(Ty->getScalarType(), 0); if (Ty->isVectorTy()) { unsigned vecSize = Ty->getVectorNumElements(); pi = ConstantVector::getSplat(vecSize, pi); halfPi = ConstantVector::getSplat(vecSize, halfPi); negHalfPi = ConstantVector::getSplat(vecSize, negHalfPi); zero = ConstantVector::getSplat(vecSize, zero); } Value *atanAddPi = Builder.CreateFAdd(atan, pi); Value *atanSubPi = Builder.CreateFSub(atan, pi); // x > 0 -> atan. Value *result = atan; Value *xLt0 = Builder.CreateFCmpOLT(x, zero); Value *xEq0 = Builder.CreateFCmpOEQ(x, zero); Value *yGe0 = Builder.CreateFCmpOGE(y, zero); Value *yLt0 = Builder.CreateFCmpOLT(y, zero); // x < 0, y >= 0 -> atan + pi. Value *xLt0AndyGe0 = Builder.CreateAnd(xLt0, yGe0); result = Builder.CreateSelect(xLt0AndyGe0, atanAddPi, result); // x < 0, y < 0 -> atan - pi. Value *xLt0AndYLt0 = Builder.CreateAnd(xLt0, yLt0); result = Builder.CreateSelect(xLt0AndYLt0, atanSubPi, result); // x == 0, y < 0 -> -pi/2 Value *xEq0AndYLt0 = Builder.CreateAnd(xEq0, yLt0); result = Builder.CreateSelect(xEq0AndYLt0, negHalfPi, result); // x == 0, y > 0 -> pi/2 Value *xEq0AndYGe0 = Builder.CreateAnd(xEq0, yGe0); result = Builder.CreateSelect(xEq0AndYGe0, halfPi, result); return result; } Value *TranslateClamp(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Type *Ty = CI->getType(); Type *EltTy = Ty->getScalarType(); DXIL::OpCode maxOp = DXIL::OpCode::FMax; DXIL::OpCode minOp = DXIL::OpCode::FMin; if (IOP == IntrinsicOp::IOP_uclamp) { maxOp = DXIL::OpCode::UMax; minOp = DXIL::OpCode::UMin; } else if (EltTy->isIntegerTy()) { maxOp = DXIL::OpCode::IMax; minOp = DXIL::OpCode::IMin; } Value *x = CI->getArgOperand(HLOperandIndex::kClampOpXIdx); Value *maxVal = CI->getArgOperand(HLOperandIndex::kClampOpMaxIdx); Value *minVal = CI->getArgOperand(HLOperandIndex::kClampOpMinIdx); IRBuilder<> Builder(CI); // min(max(x, minVal), maxVal). Value *maxXMinVal = TrivialDxilBinaryOperation(maxOp, x, minVal, hlslOP, Builder); return TrivialDxilBinaryOperation(minOp, maxXMinVal, maxVal, hlslOP, Builder); } Value *TranslateClip(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Function *discard = hlslOP->GetOpFunc(OP::OpCode::Discard, Type::getVoidTy(CI->getContext())); IRBuilder<> Builder(CI); Value *cond = nullptr; Value *arg = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); if (VectorType *VT = dyn_cast<VectorType>(arg->getType())) { Value *elt = Builder.CreateExtractElement(arg, (uint64_t)0); cond = Builder.CreateFCmpOLT(elt, hlslOP->GetFloatConst(0)); for (unsigned i = 1; i < VT->getNumElements(); i++) { Value *elt = Builder.CreateExtractElement(arg, i); Value *eltCond = Builder.CreateFCmpOLT(elt, hlslOP->GetFloatConst(0)); cond = Builder.CreateOr(cond, eltCond); } } else cond = Builder.CreateFCmpOLT(arg, hlslOP->GetFloatConst(0)); /*If discard condition evaluates to false at compile-time, then don't emit the discard instruction.*/ if (ConstantInt *constCond = dyn_cast<ConstantInt>(cond)) if (!constCond->getLimitedValue()) return nullptr; Constant *opArg = hlslOP->GetU32Const((unsigned)OP::OpCode::Discard); Builder.CreateCall(discard, {opArg, cond}); return nullptr; } Value *TranslateCross(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { VectorType *VT = cast<VectorType>(CI->getType()); DXASSERT_NOMSG(VT->getNumElements() == 3); Value *op0 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *op1 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); IRBuilder<> Builder(CI); Value *op0_x = Builder.CreateExtractElement(op0, (uint64_t)0); Value *op0_y = Builder.CreateExtractElement(op0, 1); Value *op0_z = Builder.CreateExtractElement(op0, 2); Value *op1_x = Builder.CreateExtractElement(op1, (uint64_t)0); Value *op1_y = Builder.CreateExtractElement(op1, 1); Value *op1_z = Builder.CreateExtractElement(op1, 2); auto MulSub = [&](Value *x0, Value *y0, Value *x1, Value *y1) -> Value * { Value *xy = Builder.CreateFMul(x0, y1); Value *yx = Builder.CreateFMul(y0, x1); return Builder.CreateFSub(xy, yx); }; Value *yz_zy = MulSub(op0_y, op0_z, op1_y, op1_z); Value *zx_xz = MulSub(op0_z, op0_x, op1_z, op1_x); Value *xy_yx = MulSub(op0_x, op0_y, op1_x, op1_y); Value *cross = UndefValue::get(VT); cross = Builder.CreateInsertElement(cross, yz_zy, (uint64_t)0); cross = Builder.CreateInsertElement(cross, zx_xz, 1); cross = Builder.CreateInsertElement(cross, xy_yx, 2); return cross; } Value *TranslateDegrees(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { IRBuilder<> Builder(CI); Type *Ty = CI->getType(); Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); // 180/pi. Constant *toDegreeConst = ConstantFP::get(Ty->getScalarType(), 180 / M_PI); if (Ty != Ty->getScalarType()) { toDegreeConst = ConstantVector::getSplat(Ty->getVectorNumElements(), toDegreeConst); } return Builder.CreateFMul(toDegreeConst, val); } Value *TranslateDst(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *src0 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *src1 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); Type *Ty = src1->getType(); IRBuilder<> Builder(CI); Value *Result = UndefValue::get(Ty); Constant *oneConst = ConstantFP::get(Ty->getScalarType(), 1); // dest.x = 1; Result = Builder.CreateInsertElement(Result, oneConst, (uint64_t)0); // dest.y = src0.y * src1.y; Value *src0_y = Builder.CreateExtractElement(src0, 1); Value *src1_y = Builder.CreateExtractElement(src1, 1); Value *yMuly = Builder.CreateFMul(src0_y, src1_y); Result = Builder.CreateInsertElement(Result, yMuly, 1); // dest.z = src0.z; Value *src0_z = Builder.CreateExtractElement(src0, 2); Result = Builder.CreateInsertElement(Result, src0_z, 2); // dest.w = src1.w; Value *src1_w = Builder.CreateExtractElement(src1, 3); Result = Builder.CreateInsertElement(Result, src1_w, 3); return Result; } Value *TranslateFirstbitHi(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *firstbitHi = TrivialUnaryOperation(CI, IOP, opcode, helper, pObjHelper, Translated); // firstbitHi == -1? -1 : (bitWidth-1 -firstbitHi); IRBuilder<> Builder(CI); Constant *neg1 = Builder.getInt32(-1); Value *src = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Type *Ty = src->getType(); IntegerType *EltTy = cast<IntegerType>(Ty->getScalarType()); Constant *bitWidth = Builder.getInt32(EltTy->getBitWidth() - 1); if (Ty == Ty->getScalarType()) { Value *sub = Builder.CreateSub(bitWidth, firstbitHi); Value *cond = Builder.CreateICmpEQ(neg1, firstbitHi); return Builder.CreateSelect(cond, neg1, sub); } else { Value *result = UndefValue::get(CI->getType()); unsigned vecSize = Ty->getVectorNumElements(); for (unsigned i = 0; i < vecSize; i++) { Value *EltFirstBit = Builder.CreateExtractElement(firstbitHi, i); Value *sub = Builder.CreateSub(bitWidth, EltFirstBit); Value *cond = Builder.CreateICmpEQ(neg1, EltFirstBit); Value *Elt = Builder.CreateSelect(cond, neg1, sub); result = Builder.CreateInsertElement(result, Elt, i); } return result; } } Value *TranslateFirstbitLo(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *firstbitLo = TrivialUnaryOperation(CI, IOP, opcode, helper, pObjHelper, Translated); return firstbitLo; } Value *TranslateLit(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *n_dot_l = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc0Idx); Value *n_dot_h = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc1Idx); Value *m = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc2Idx); IRBuilder<> Builder(CI); Type *Ty = m->getType(); Value *Result = UndefValue::get(VectorType::get(Ty, 4)); // Result = (ambient, diffuse, specular, 1) // ambient = 1. Constant *oneConst = ConstantFP::get(Ty, 1); Result = Builder.CreateInsertElement(Result, oneConst, (uint64_t)0); // Result.w = 1. Result = Builder.CreateInsertElement(Result, oneConst, 3); // diffuse = (n_dot_l < 0) ? 0 : n_dot_l. Constant *zeroConst = ConstantFP::get(Ty, 0); Value *nlCmp = Builder.CreateFCmpOLT(n_dot_l, zeroConst); Value *diffuse = Builder.CreateSelect(nlCmp, zeroConst, n_dot_l); Result = Builder.CreateInsertElement(Result, diffuse, 1); // specular = ((n_dot_l < 0) || (n_dot_h < 0)) ? 0: (n_dot_h ^ m). Value *nhCmp = Builder.CreateFCmpOLT(n_dot_h, zeroConst); Value *specCond = Builder.CreateOr(nlCmp, nhCmp); bool isFXCCompatMode = CI->getModule()->GetHLModule().GetHLOptions().bFXCCompatMode; Value *nhPowM = TranslatePowImpl(&helper.hlslOP, Builder, n_dot_h, m, isFXCCompatMode); Value *spec = Builder.CreateSelect(specCond, zeroConst, nhPowM); Result = Builder.CreateInsertElement(Result, spec, 2); return Result; } Value *TranslateRadians(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { IRBuilder<> Builder(CI); Type *Ty = CI->getType(); Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); // pi/180. Constant *toRadianConst = ConstantFP::get(Ty->getScalarType(), M_PI / 180); if (Ty != Ty->getScalarType()) { toRadianConst = ConstantVector::getSplat(Ty->getVectorNumElements(), toRadianConst); } return Builder.CreateFMul(toRadianConst, val); } Value *TranslateF16ToF32(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { IRBuilder<> Builder(CI); Value *x = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Type *Ty = CI->getType(); Function *f16tof32 = helper.hlslOP.GetOpFunc(opcode, helper.voidTy); return TrivialDxilOperation( f16tof32, opcode, {Builder.getInt32(static_cast<unsigned>(opcode)), x}, x->getType(), Ty, &helper.hlslOP, Builder); } Value *TranslateF32ToF16(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { IRBuilder<> Builder(CI); Value *x = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Type *Ty = CI->getType(); Function *f32tof16 = helper.hlslOP.GetOpFunc(opcode, helper.voidTy); return TrivialDxilOperation( f32tof16, opcode, {Builder.getInt32(static_cast<unsigned>(opcode)), x}, x->getType(), Ty, &helper.hlslOP, Builder); } Value *TranslateLength(CallInst *CI, Value *val, hlsl::OP *hlslOP) { IRBuilder<> Builder(CI); if (VectorType *VT = dyn_cast<VectorType>(val->getType())) { Value *Elt = Builder.CreateExtractElement(val, (uint64_t)0); unsigned size = VT->getNumElements(); if (size > 1) { Value *Sum = Builder.CreateFMul(Elt, Elt); for (unsigned i = 1; i < size; i++) { Elt = Builder.CreateExtractElement(val, i); Value *Mul = Builder.CreateFMul(Elt, Elt); Sum = Builder.CreateFAdd(Sum, Mul); } DXIL::OpCode sqrt = DXIL::OpCode::Sqrt; Function *dxilSqrt = hlslOP->GetOpFunc(sqrt, VT->getElementType()); Value *opArg = hlslOP->GetI32Const((unsigned)sqrt); return Builder.CreateCall(dxilSqrt, {opArg, Sum}, hlslOP->GetOpCodeName(sqrt)); } else { val = Elt; } } DXIL::OpCode fabs = DXIL::OpCode::FAbs; Function *dxilFAbs = hlslOP->GetOpFunc(fabs, val->getType()); Value *opArg = hlslOP->GetI32Const((unsigned)fabs); return Builder.CreateCall(dxilFAbs, {opArg, val}, hlslOP->GetOpCodeName(fabs)); } Value *TranslateLength(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); return TranslateLength(CI, val, hlslOP); } Value *TranslateModF(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *val = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *outIntPtr = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); IRBuilder<> Builder(CI); Value *intP = TrivialDxilUnaryOperation(OP::OpCode::Round_z, val, hlslOP, Builder); Value *fracP = Builder.CreateFSub(val, intP); Builder.CreateStore(intP, outIntPtr); return fracP; } Value *TranslateDistance(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *src0 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *src1 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); IRBuilder<> Builder(CI); Value *sub = Builder.CreateFSub(src0, src1); return TranslateLength(CI, sub, hlslOP); } Value *TranslateExp(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; IRBuilder<> Builder(CI); Type *Ty = CI->getType(); Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Constant *log2eConst = ConstantFP::get(Ty->getScalarType(), M_LOG2E); if (Ty != Ty->getScalarType()) { log2eConst = ConstantVector::getSplat(Ty->getVectorNumElements(), log2eConst); } val = Builder.CreateFMul(log2eConst, val); Value *exp = TrivialDxilUnaryOperation(OP::OpCode::Exp, val, hlslOP, Builder); return exp; } Value *TranslateLog(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; IRBuilder<> Builder(CI); Type *Ty = CI->getType(); Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Constant *ln2Const = ConstantFP::get(Ty->getScalarType(), M_LN2); if (Ty != Ty->getScalarType()) { ln2Const = ConstantVector::getSplat(Ty->getVectorNumElements(), ln2Const); } Value *log = TrivialDxilUnaryOperation(OP::OpCode::Log, val, hlslOP, Builder); return Builder.CreateFMul(ln2Const, log); } Value *TranslateLog10(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; IRBuilder<> Builder(CI); Type *Ty = CI->getType(); Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Constant *log2_10Const = ConstantFP::get(Ty->getScalarType(), M_LN2 / M_LN10); if (Ty != Ty->getScalarType()) { log2_10Const = ConstantVector::getSplat(Ty->getVectorNumElements(), log2_10Const); } Value *log = TrivialDxilUnaryOperation(OP::OpCode::Log, val, hlslOP, Builder); return Builder.CreateFMul(log2_10Const, log); } Value *TranslateFMod(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *src0 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *src1 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); IRBuilder<> Builder(CI); Value *div = Builder.CreateFDiv(src0, src1); Value *negDiv = Builder.CreateFNeg(div); Value *ge = Builder.CreateFCmpOGE(div, negDiv); Value *absDiv = TrivialDxilUnaryOperation(OP::OpCode::FAbs, div, hlslOP, Builder); Value *frc = TrivialDxilUnaryOperation(OP::OpCode::Frc, absDiv, hlslOP, Builder); Value *negFrc = Builder.CreateFNeg(frc); Value *realFrc = Builder.CreateSelect(ge, frc, negFrc); return Builder.CreateFMul(realFrc, src1); } Value *TranslateFUIBinary(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { bool isFloat = CI->getType()->getScalarType()->isFloatingPointTy(); if (isFloat) { switch (IOP) { case IntrinsicOp::IOP_max: opcode = OP::OpCode::FMax; break; case IntrinsicOp::IOP_min: default: DXASSERT_NOMSG(IOP == IntrinsicOp::IOP_min); opcode = OP::OpCode::FMin; break; } } return TrivialBinaryOperation(CI, IOP, opcode, helper, pObjHelper, Translated); } Value *TranslateFUITrinary(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { bool isFloat = CI->getType()->getScalarType()->isFloatingPointTy(); if (isFloat) { switch (IOP) { case IntrinsicOp::IOP_mad: default: DXASSERT_NOMSG(IOP == IntrinsicOp::IOP_mad); opcode = OP::OpCode::FMad; break; } } return TrivialTrinaryOperation(CI, IOP, opcode, helper, pObjHelper, Translated); } Value *TranslateFrexp(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *val = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *expPtr = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); IRBuilder<> Builder(CI); Type *i32Ty = Type::getInt32Ty(CI->getContext()); Constant *exponentMaskConst = ConstantInt::get(i32Ty, 0x7f800000); Constant *mantisaMaskConst = ConstantInt::get(i32Ty, 0x007fffff); Constant *exponentShiftConst = ConstantInt::get(i32Ty, 23); Constant *mantisaOrConst = ConstantInt::get(i32Ty, 0x3f000000); Constant *exponentBiasConst = ConstantInt::get(i32Ty, -(int)0x3f000000); Constant *zeroVal = hlslOP->GetFloatConst(0); // int iVal = asint(val); Type *dstTy = i32Ty; Type *Ty = val->getType(); if (Ty->isVectorTy()) { unsigned vecSize = Ty->getVectorNumElements(); dstTy = VectorType::get(i32Ty, vecSize); exponentMaskConst = ConstantVector::getSplat(vecSize, exponentMaskConst); mantisaMaskConst = ConstantVector::getSplat(vecSize, mantisaMaskConst); exponentShiftConst = ConstantVector::getSplat(vecSize, exponentShiftConst); mantisaOrConst = ConstantVector::getSplat(vecSize, mantisaOrConst); exponentBiasConst = ConstantVector::getSplat(vecSize, exponentBiasConst); zeroVal = ConstantVector::getSplat(vecSize, zeroVal); } // bool ne = val != 0; Value *notZero = Builder.CreateFCmpUNE(val, zeroVal); notZero = Builder.CreateSExt(notZero, dstTy); Value *intVal = Builder.CreateBitCast(val, dstTy); // temp = intVal & exponentMask; Value *temp = Builder.CreateAnd(intVal, exponentMaskConst); // temp = temp + exponentBias; temp = Builder.CreateAdd(temp, exponentBiasConst); // temp = temp & ne; temp = Builder.CreateAnd(temp, notZero); // temp = temp >> exponentShift; temp = Builder.CreateAShr(temp, exponentShiftConst); // exp = float(temp); Value *exp = Builder.CreateSIToFP(temp, Ty); Builder.CreateStore(exp, expPtr); // temp = iVal & mantisaMask; temp = Builder.CreateAnd(intVal, mantisaMaskConst); // temp = temp | mantisaOr; temp = Builder.CreateOr(temp, mantisaOrConst); // mantisa = temp & ne; Value *mantisa = Builder.CreateAnd(temp, notZero); return Builder.CreateBitCast(mantisa, Ty); } Value *TranslateLdExp(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *src0 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *src1 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); IRBuilder<> Builder(CI); Value *exp = TrivialDxilUnaryOperation(OP::OpCode::Exp, src1, hlslOP, Builder); return Builder.CreateFMul(exp, src0); } Value *TranslateFWidth(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *src = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); IRBuilder<> Builder(CI); Value *ddx = TrivialDxilUnaryOperation(OP::OpCode::DerivCoarseX, src, hlslOP, Builder); Value *absDdx = TrivialDxilUnaryOperation(OP::OpCode::FAbs, ddx, hlslOP, Builder); Value *ddy = TrivialDxilUnaryOperation(OP::OpCode::DerivCoarseY, src, hlslOP, Builder); Value *absDdy = TrivialDxilUnaryOperation(OP::OpCode::FAbs, ddy, hlslOP, Builder); return Builder.CreateFAdd(absDdx, absDdy); } Value *TranslateLerp(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { // x + s(y-x) Value *x = CI->getArgOperand(HLOperandIndex::kLerpOpXIdx); Value *y = CI->getArgOperand(HLOperandIndex::kLerpOpYIdx); IRBuilder<> Builder(CI); Value *ySubx = Builder.CreateFSub(y, x); Value *s = CI->getArgOperand(HLOperandIndex::kLerpOpSIdx); Value *sMulSub = Builder.CreateFMul(s, ySubx); return Builder.CreateFAdd(x, sMulSub); } Value *TrivialDotOperation(OP::OpCode opcode, Value *src0, Value *src1, hlsl::OP *hlslOP, IRBuilder<> &Builder) { Type *Ty = src0->getType()->getScalarType(); Function *dxilFunc = hlslOP->GetOpFunc(opcode, Ty); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); SmallVector<Value *, 9> args; args.emplace_back(opArg); unsigned vecSize = src0->getType()->getVectorNumElements(); for (unsigned i = 0; i < vecSize; i++) args.emplace_back(Builder.CreateExtractElement(src0, i)); for (unsigned i = 0; i < vecSize; i++) args.emplace_back(Builder.CreateExtractElement(src1, i)); Value *dotOP = Builder.CreateCall(dxilFunc, args); return dotOP; } Value *TranslateIDot(Value *arg0, Value *arg1, unsigned vecSize, hlsl::OP *hlslOP, IRBuilder<> &Builder, bool Unsigned = false) { auto madOpCode = Unsigned ? DXIL::OpCode::UMad : DXIL::OpCode::IMad; Value *Elt0 = Builder.CreateExtractElement(arg0, (uint64_t)0); Value *Elt1 = Builder.CreateExtractElement(arg1, (uint64_t)0); Value *Result = Builder.CreateMul(Elt0, Elt1); for (unsigned iVecElt = 1; iVecElt < vecSize; ++iVecElt) { Elt0 = Builder.CreateExtractElement(arg0, iVecElt); Elt1 = Builder.CreateExtractElement(arg1, iVecElt); Result = TrivialDxilTrinaryOperation(madOpCode, Elt0, Elt1, Result, hlslOP, Builder); } return Result; } Value *TranslateFDot(Value *arg0, Value *arg1, unsigned vecSize, hlsl::OP *hlslOP, IRBuilder<> &Builder) { switch (vecSize) { case 2: return TrivialDotOperation(OP::OpCode::Dot2, arg0, arg1, hlslOP, Builder); break; case 3: return TrivialDotOperation(OP::OpCode::Dot3, arg0, arg1, hlslOP, Builder); break; case 4: return TrivialDotOperation(OP::OpCode::Dot4, arg0, arg1, hlslOP, Builder); break; default: DXASSERT(vecSize == 1, "wrong vector size"); { Value *vecMul = Builder.CreateFMul(arg0, arg1); return Builder.CreateExtractElement(vecMul, (uint64_t)0); } break; } } Value *TranslateDot(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *arg0 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Type *Ty = arg0->getType(); unsigned vecSize = Ty->getVectorNumElements(); Value *arg1 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); IRBuilder<> Builder(CI); if (Ty->getScalarType()->isFloatingPointTy()) { return TranslateFDot(arg0, arg1, vecSize, hlslOP, Builder); } else { return TranslateIDot(arg0, arg1, vecSize, hlslOP, Builder); } } Value *TranslateNormalize(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Type *Ty = CI->getType(); Value *op = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); VectorType *VT = cast<VectorType>(Ty); unsigned vecSize = VT->getNumElements(); IRBuilder<> Builder(CI); Value *dot = TranslateFDot(op, op, vecSize, hlslOP, Builder); DXIL::OpCode rsqrtOp = DXIL::OpCode::Rsqrt; Function *dxilRsqrt = hlslOP->GetOpFunc(rsqrtOp, VT->getElementType()); Value *rsqrt = Builder.CreateCall( dxilRsqrt, {hlslOP->GetI32Const((unsigned)rsqrtOp), dot}, hlslOP->GetOpCodeName(rsqrtOp)); Value *vecRsqrt = UndefValue::get(VT); for (unsigned i = 0; i < VT->getNumElements(); i++) vecRsqrt = Builder.CreateInsertElement(vecRsqrt, rsqrt, i); return Builder.CreateFMul(op, vecRsqrt); } Value *TranslateReflect(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; // v = i - 2 * n * dot(i, n). IRBuilder<> Builder(CI); Value *i = CI->getArgOperand(HLOperandIndex::kReflectOpIIdx); Value *n = CI->getArgOperand(HLOperandIndex::kReflectOpNIdx); VectorType *VT = cast<VectorType>(i->getType()); unsigned vecSize = VT->getNumElements(); Value *dot = TranslateFDot(i, n, vecSize, hlslOP, Builder); // 2 * dot (i, n). dot = Builder.CreateFMul(ConstantFP::get(dot->getType(), 2.0), dot); // 2 * n * dot(i, n). Value *vecDot = Builder.CreateVectorSplat(vecSize, dot); Value *nMulDot = Builder.CreateFMul(vecDot, n); // i - 2 * n * dot(i, n). return Builder.CreateFSub(i, nMulDot); } Value *TranslateRefract(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; // d = dot(i, n); // t = 1 - eta * eta * ( 1 - d*d); // cond = t >= 1; // r = eta * i - (eta * d + sqrt(t)) * n; // return cond ? r : 0; IRBuilder<> Builder(CI); Value *i = CI->getArgOperand(HLOperandIndex::kRefractOpIIdx); Value *n = CI->getArgOperand(HLOperandIndex::kRefractOpNIdx); Value *eta = CI->getArgOperand(HLOperandIndex::kRefractOpEtaIdx); VectorType *VT = cast<VectorType>(i->getType()); unsigned vecSize = VT->getNumElements(); Value *dot = TranslateFDot(i, n, vecSize, hlslOP, Builder); // eta * eta; Value *eta2 = Builder.CreateFMul(eta, eta); // d*d; Value *dot2 = Builder.CreateFMul(dot, dot); Constant *one = ConstantFP::get(eta->getType(), 1); Constant *zero = ConstantFP::get(eta->getType(), 0); // 1- d*d; dot2 = Builder.CreateFSub(one, dot2); // eta * eta * (1-d*d); eta2 = Builder.CreateFMul(dot2, eta2); // t = 1 - eta * eta * ( 1 - d*d); Value *t = Builder.CreateFSub(one, eta2); // cond = t >= 0; Value *cond = Builder.CreateFCmpOGE(t, zero); // eta * i; Value *vecEta = UndefValue::get(VT); for (unsigned i = 0; i < vecSize; i++) vecEta = Builder.CreateInsertElement(vecEta, eta, i); Value *etaMulI = Builder.CreateFMul(i, vecEta); // sqrt(t); Value *sqrt = TrivialDxilUnaryOperation(OP::OpCode::Sqrt, t, hlslOP, Builder); // eta * d; Value *etaMulD = Builder.CreateFMul(eta, dot); // eta * d + sqrt(t); Value *etaSqrt = Builder.CreateFAdd(etaMulD, sqrt); // (eta * d + sqrt(t)) * n; Value *vecEtaSqrt = Builder.CreateVectorSplat(vecSize, etaSqrt); Value *r = Builder.CreateFMul(vecEtaSqrt, n); // r = eta * i - (eta * d + sqrt(t)) * n; r = Builder.CreateFSub(etaMulI, r); Value *refract = Builder.CreateSelect(cond, r, ConstantVector::getSplat(vecSize, zero)); return refract; } Value *TranslateSmoothStep(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; // s = saturate((x-min)/(max-min)). IRBuilder<> Builder(CI); Value *minVal = CI->getArgOperand(HLOperandIndex::kSmoothStepOpMinIdx); Value *maxVal = CI->getArgOperand(HLOperandIndex::kSmoothStepOpMaxIdx); Value *maxSubMin = Builder.CreateFSub(maxVal, minVal); Value *x = CI->getArgOperand(HLOperandIndex::kSmoothStepOpXIdx); Value *xSubMin = Builder.CreateFSub(x, minVal); Value *satVal = Builder.CreateFDiv(xSubMin, maxSubMin); Value *s = TrivialDxilUnaryOperation(DXIL::OpCode::Saturate, satVal, hlslOP, Builder); // return s * s *(3-2*s). Constant *c2 = ConstantFP::get(CI->getType(), 2); Constant *c3 = ConstantFP::get(CI->getType(), 3); Value *sMul2 = Builder.CreateFMul(s, c2); Value *result = Builder.CreateFSub(c3, sMul2); result = Builder.CreateFMul(s, result); result = Builder.CreateFMul(s, result); return result; } Value *TranslateMSad4(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *ref = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc0Idx); Value *src = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc1Idx); Value *accum = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc2Idx); Type *Ty = CI->getType(); IRBuilder<> Builder(CI); Value *vecRef = UndefValue::get(Ty); for (unsigned i = 0; i < 4; i++) vecRef = Builder.CreateInsertElement(vecRef, ref, i); Value *srcX = Builder.CreateExtractElement(src, (uint64_t)0); Value *srcY = Builder.CreateExtractElement(src, 1); Value *byteSrc = UndefValue::get(Ty); byteSrc = Builder.CreateInsertElement(byteSrc, srcX, (uint64_t)0); // ushr r0.yzw, srcX, l(0, 8, 16, 24) // bfi r1.yzw, l(0, 8, 16, 24), l(0, 24, 16, 8), srcX, r0.yyzw Value *bfiOpArg = hlslOP->GetU32Const(static_cast<unsigned>(DXIL::OpCode::Bfi)); Value *imm8 = hlslOP->GetU32Const(8); Value *imm16 = hlslOP->GetU32Const(16); Value *imm24 = hlslOP->GetU32Const(24); Ty = ref->getType(); // Get x[31:8]. Value *srcXShift = Builder.CreateLShr(srcX, imm8); // y[0~7] x[31:8]. Value *byteSrcElt = TrivialDxilOperation( DXIL::OpCode::Bfi, {bfiOpArg, imm8, imm24, srcY, srcXShift}, Ty, Ty, hlslOP, Builder); byteSrc = Builder.CreateInsertElement(byteSrc, byteSrcElt, 1); // Get x[31:16]. srcXShift = Builder.CreateLShr(srcXShift, imm8); // y[0~15] x[31:16]. byteSrcElt = TrivialDxilOperation(DXIL::OpCode::Bfi, {bfiOpArg, imm16, imm16, srcY, srcXShift}, Ty, Ty, hlslOP, Builder); byteSrc = Builder.CreateInsertElement(byteSrc, byteSrcElt, 2); // Get x[31:24]. srcXShift = Builder.CreateLShr(srcXShift, imm8); // y[0~23] x[31:24]. byteSrcElt = TrivialDxilOperation(DXIL::OpCode::Bfi, {bfiOpArg, imm24, imm8, srcY, srcXShift}, Ty, Ty, hlslOP, Builder); byteSrc = Builder.CreateInsertElement(byteSrc, byteSrcElt, 3); // Msad on vecref and byteSrc. return TrivialDxilTrinaryOperation(DXIL::OpCode::Msad, vecRef, byteSrc, accum, hlslOP, Builder); } Value *TranslateRCP(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Type *Ty = CI->getType(); Value *op = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); IRBuilder<> Builder(CI); Constant *one = ConstantFP::get(Ty->getScalarType(), 1.0); if (Ty != Ty->getScalarType()) { one = ConstantVector::getSplat(Ty->getVectorNumElements(), one); } return Builder.CreateFDiv(one, op); } Value *TranslateSign(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Type *Ty = val->getType(); bool IsInt = Ty->getScalarType()->isIntegerTy(); IRBuilder<> Builder(CI); Constant *zero = Constant::getNullValue(Ty); Value *zeroLtVal = IsInt ? Builder.CreateICmpSLT(zero, val) : Builder.CreateFCmpOLT(zero, val); Value *valLtZero = IsInt ? Builder.CreateICmpSLT(val, zero) : Builder.CreateFCmpOLT(val, zero); zeroLtVal = Builder.CreateZExt(zeroLtVal, CI->getType()); valLtZero = Builder.CreateZExt(valLtZero, CI->getType()); return Builder.CreateSub(zeroLtVal, valLtZero); } Value *TranslateUSign(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Type *Ty = val->getType(); IRBuilder<> Builder(CI); Constant *zero = Constant::getNullValue(Ty); Value *nonZero = Builder.CreateICmpNE(val, zero); return Builder.CreateZExt(nonZero, CI->getType()); } Value *TranslateStep(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *edge = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *x = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); Type *Ty = CI->getType(); IRBuilder<> Builder(CI); Constant *one = ConstantFP::get(Ty->getScalarType(), 1.0); Constant *zero = ConstantFP::get(Ty->getScalarType(), 0); Value *cond = Builder.CreateFCmpOLT(x, edge); if (Ty != Ty->getScalarType()) { one = ConstantVector::getSplat(Ty->getVectorNumElements(), one); zero = ConstantVector::getSplat(Ty->getVectorNumElements(), zero); } return Builder.CreateSelect(cond, zero, one); } Value *TranslatePow(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *x = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *y = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); bool isFXCCompatMode = CI->getModule()->GetHLModule().GetHLOptions().bFXCCompatMode; IRBuilder<> Builder(CI); return TranslatePowImpl(hlslOP, Builder, x, y, isFXCCompatMode); } Value *TranslatePrintf(CallInst *CI, IntrinsicOp IOP, DXIL::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Translated = false; dxilutil::EmitErrorOnInstruction(CI, "use of unsupported identifier 'printf'"); return nullptr; } Value *TranslateFaceforward(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Type *Ty = CI->getType(); Value *n = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc0Idx); Value *i = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc1Idx); Value *ng = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc2Idx); IRBuilder<> Builder(CI); unsigned vecSize = Ty->getVectorNumElements(); // -n x sign(dot(i, ng)). Value *dotOp = TranslateFDot(i, ng, vecSize, hlslOP, Builder); Constant *zero = ConstantFP::get(Ty->getScalarType(), 0); Value *dotLtZero = Builder.CreateFCmpOLT(dotOp, zero); Value *negN = Builder.CreateFNeg(n); Value *faceforward = Builder.CreateSelect(dotLtZero, n, negN); return faceforward; } Value *TrivialSetMeshOutputCounts(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *src0 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *src1 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); IRBuilder<> Builder(CI); Constant *opArg = hlslOP->GetU32Const((unsigned)op); Value *args[] = {opArg, src0, src1}; Function *dxilFunc = hlslOP->GetOpFunc(op, Type::getVoidTy(CI->getContext())); Builder.CreateCall(dxilFunc, args); return nullptr; } Value *TrivialDispatchMesh(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *src0 = CI->getArgOperand(HLOperandIndex::kDispatchMeshOpThreadX); Value *src1 = CI->getArgOperand(HLOperandIndex::kDispatchMeshOpThreadY); Value *src2 = CI->getArgOperand(HLOperandIndex::kDispatchMeshOpThreadZ); Value *src3 = CI->getArgOperand(HLOperandIndex::kDispatchMeshOpPayload); IRBuilder<> Builder(CI); Constant *opArg = hlslOP->GetU32Const((unsigned)op); Value *args[] = {opArg, src0, src1, src2, src3}; Function *dxilFunc = hlslOP->GetOpFunc(op, src3->getType()); Builder.CreateCall(dxilFunc, args); return nullptr; } } // namespace // MOP intrinsics namespace { Value *TranslateGetSamplePosition(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); IRBuilder<> Builder(CI); Value *sampleIdx = CI->getArgOperand(HLOperandIndex::kGetSamplePositionSampleIdxOpIndex); OP::OpCode opcode = OP::OpCode::Texture2DMSGetSamplePosition; llvm::Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); Function *dxilFunc = hlslOP->GetOpFunc(opcode, Type::getVoidTy(CI->getContext())); Value *args[] = {opArg, handle, sampleIdx}; Value *samplePos = Builder.CreateCall(dxilFunc, args); Value *result = UndefValue::get(CI->getType()); Value *samplePosX = Builder.CreateExtractValue(samplePos, 0); Value *samplePosY = Builder.CreateExtractValue(samplePos, 1); result = Builder.CreateInsertElement(result, samplePosX, (uint64_t)0); result = Builder.CreateInsertElement(result, samplePosY, 1); return result; } Value *TranslateGetDimensions(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); DxilResource::Kind RK = pObjHelper->GetRK(handle); IRBuilder<> Builder(CI); OP::OpCode opcode = OP::OpCode::GetDimensions; llvm::Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); Function *dxilFunc = hlslOP->GetOpFunc(opcode, Type::getVoidTy(CI->getContext())); Type *i32Ty = Type::getInt32Ty(CI->getContext()); Value *mipLevel = UndefValue::get(i32Ty); unsigned widthOpIdx = HLOperandIndex::kGetDimensionsMipWidthOpIndex; switch (RK) { case DxilResource::Kind::Texture1D: case DxilResource::Kind::Texture1DArray: case DxilResource::Kind::Texture2D: case DxilResource::Kind::Texture2DArray: case DxilResource::Kind::TextureCube: case DxilResource::Kind::TextureCubeArray: case DxilResource::Kind::Texture3D: { Value *opMipLevel = CI->getArgOperand(HLOperandIndex::kGetDimensionsMipLevelOpIndex); // mipLevel is in parameter, should not be pointer. if (!opMipLevel->getType()->isPointerTy()) mipLevel = opMipLevel; else { // No mip level. widthOpIdx = HLOperandIndex::kGetDimensionsNoMipWidthOpIndex; mipLevel = ConstantInt::get(i32Ty, 0); } } break; default: widthOpIdx = HLOperandIndex::kGetDimensionsNoMipWidthOpIndex; break; } Value *args[] = {opArg, handle, mipLevel}; Value *dims = Builder.CreateCall(dxilFunc, args); unsigned dimensionIdx = 0; Value *width = Builder.CreateExtractValue(dims, dimensionIdx++); Value *widthPtr = CI->getArgOperand(widthOpIdx); if (widthPtr->getType()->getPointerElementType()->isFloatingPointTy()) width = Builder.CreateSIToFP(width, widthPtr->getType()->getPointerElementType()); Builder.CreateStore(width, widthPtr); if (DXIL::IsStructuredBuffer(RK)) { // Set stride. Value *stridePtr = CI->getArgOperand(widthOpIdx + 1); const DataLayout &DL = helper.dataLayout; Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); Type *bufTy = pObjHelper->GetResourceType(handle); Type *bufRetTy = bufTy->getStructElementType(0); unsigned stride = DL.getTypeAllocSize(bufRetTy); Builder.CreateStore(hlslOP->GetU32Const(stride), stridePtr); } else { if (widthOpIdx == HLOperandIndex::kGetDimensionsMipWidthOpIndex || // Samples is in w channel too. RK == DXIL::ResourceKind::Texture2DMS) { // Has mip. for (unsigned argIdx = widthOpIdx + 1; argIdx < CI->getNumArgOperands() - 1; argIdx++) { Value *dim = Builder.CreateExtractValue(dims, dimensionIdx++); Value *ptr = CI->getArgOperand(argIdx); if (ptr->getType()->getPointerElementType()->isFloatingPointTy()) dim = Builder.CreateSIToFP(dim, ptr->getType()->getPointerElementType()); Builder.CreateStore(dim, ptr); } // NumOfLevel is in w channel. dimensionIdx = 3; Value *dim = Builder.CreateExtractValue(dims, dimensionIdx); Value *ptr = CI->getArgOperand(CI->getNumArgOperands() - 1); if (ptr->getType()->getPointerElementType()->isFloatingPointTy()) dim = Builder.CreateSIToFP(dim, ptr->getType()->getPointerElementType()); Builder.CreateStore(dim, ptr); } else { for (unsigned argIdx = widthOpIdx + 1; argIdx < CI->getNumArgOperands(); argIdx++) { Value *dim = Builder.CreateExtractValue(dims, dimensionIdx++); Value *ptr = CI->getArgOperand(argIdx); if (ptr->getType()->getPointerElementType()->isFloatingPointTy()) dim = Builder.CreateSIToFP(dim, ptr->getType()->getPointerElementType()); Builder.CreateStore(dim, ptr); } } } return nullptr; } Value *GenerateUpdateCounter(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); pObjHelper->MarkHasCounter(handle, helper.i8Ty); bool bInc = IOP == IntrinsicOp::MOP_IncrementCounter; IRBuilder<> Builder(CI); OP::OpCode OpCode = OP::OpCode::BufferUpdateCounter; Value *OpCodeArg = hlslOP->GetU32Const((unsigned)OpCode); Value *IncVal = hlslOP->GetI8Const(bInc ? 1 : -1); // Create BufferUpdateCounter call. Value *Args[] = {OpCodeArg, handle, IncVal}; Function *F = hlslOP->GetOpFunc(OpCode, Type::getVoidTy(handle->getContext())); return Builder.CreateCall(F, Args); } static Value *ScalarizeResRet(Type *RetTy, Value *ResRet, IRBuilder<> &Builder) { // Extract value part. Value *retVal = llvm::UndefValue::get(RetTy); if (RetTy->isVectorTy()) { for (unsigned i = 0; i < RetTy->getVectorNumElements(); i++) { Value *retComp = Builder.CreateExtractValue(ResRet, i); retVal = Builder.CreateInsertElement(retVal, retComp, i); } } else { retVal = Builder.CreateExtractValue(ResRet, 0); } return retVal; } static Value *ScalarizeElements(Type *RetTy, ArrayRef<Value *> Elts, IRBuilder<> &Builder) { // Extract value part. Value *retVal = llvm::UndefValue::get(RetTy); if (RetTy->isVectorTy()) { unsigned vecSize = RetTy->getVectorNumElements(); DXASSERT(vecSize <= Elts.size(), "vector size mismatch"); for (unsigned i = 0; i < vecSize; i++) { Value *retComp = Elts[i]; retVal = Builder.CreateInsertElement(retVal, retComp, i); } } else { retVal = Elts[0]; } return retVal; } void UpdateStatus(Value *ResRet, Value *status, IRBuilder<> &Builder, hlsl::OP *hlslOp) { if (status && !isa<UndefValue>(status)) { Value *statusVal = Builder.CreateExtractValue(ResRet, DXIL::kResRetStatusIndex); Value *checkAccessOp = hlslOp->GetI32Const( static_cast<unsigned>(DXIL::OpCode::CheckAccessFullyMapped)); Function *checkAccessFn = hlslOp->GetOpFunc( DXIL::OpCode::CheckAccessFullyMapped, statusVal->getType()); // CheckAccess on status. Value *bStatus = Builder.CreateCall(checkAccessFn, {checkAccessOp, statusVal}); Value *extStatus = Builder.CreateZExt(bStatus, Type::getInt32Ty(status->getContext())); Builder.CreateStore(extStatus, status); } } Value *SplatToVector(Value *Elt, Type *DstTy, IRBuilder<> &Builder) { Value *Result = UndefValue::get(DstTy); for (unsigned i = 0; i < DstTy->getVectorNumElements(); i++) Result = Builder.CreateInsertElement(Result, Elt, i); return Result; } Value *TranslateMul(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *arg0 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *arg1 = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); Type *arg0Ty = arg0->getType(); Type *arg1Ty = arg1->getType(); IRBuilder<> Builder(CI); if (arg0Ty->isVectorTy()) { if (arg1Ty->isVectorTy()) { // mul(vector, vector) == dot(vector, vector) unsigned vecSize = arg0Ty->getVectorNumElements(); if (arg0Ty->getScalarType()->isFloatingPointTy()) { return TranslateFDot(arg0, arg1, vecSize, hlslOP, Builder); } else { return TranslateIDot(arg0, arg1, vecSize, hlslOP, Builder, IOP == IntrinsicOp::IOP_umul); } } else { // mul(vector, scalar) == vector * scalar-splat arg1 = SplatToVector(arg1, arg0Ty, Builder); } } else { if (arg1Ty->isVectorTy()) { // mul(scalar, vector) == scalar-splat * vector arg0 = SplatToVector(arg0, arg1Ty, Builder); } // else mul(scalar, scalar) == scalar * scalar; } // create fmul/mul for the pair of vectors or scalars if (arg0Ty->getScalarType()->isFloatingPointTy()) { return Builder.CreateFMul(arg0, arg1); } else { return Builder.CreateMul(arg0, arg1); } } // Sample intrinsics. struct SampleHelper { SampleHelper(CallInst *CI, OP::OpCode op, HLObjectOperationLowerHelper *pObjHelper); OP::OpCode opcode = OP::OpCode::NumOpCodes; DXIL::ResourceKind resourceKind = DXIL::ResourceKind::Invalid; Value *sampledTexHandle = nullptr; Value *texHandle = nullptr; Value *samplerHandle = nullptr; static const unsigned kMaxCoordDimensions = 4; unsigned coordDimensions = 0; Value *coord[kMaxCoordDimensions]; Value *compareValue = nullptr; Value *bias = nullptr; Value *lod = nullptr; // SampleGrad only. static const unsigned kMaxDDXYDimensions = 3; Value *ddx[kMaxDDXYDimensions]; Value *ddy[kMaxDDXYDimensions]; // Optional. static const unsigned kMaxOffsetDimensions = 3; unsigned offsetDimensions = 0; Value *offset[kMaxOffsetDimensions]; Value *clamp = nullptr; Value *status = nullptr; unsigned maxHLOperandRead = 0; Value *ReadHLOperand(CallInst *CI, unsigned opIdx) { if (CI->getNumArgOperands() > opIdx) { maxHLOperandRead = std::max(maxHLOperandRead, opIdx); return CI->getArgOperand(opIdx); } return nullptr; } void TranslateCoord(CallInst *CI, unsigned coordIdx) { Value *coordArg = ReadHLOperand(CI, coordIdx); DXASSERT_NOMSG(coordArg); DXASSERT(coordArg->getType()->getVectorNumElements() == coordDimensions, "otherwise, HL coordinate dimensions mismatch"); IRBuilder<> Builder(CI); for (unsigned i = 0; i < coordDimensions; i++) coord[i] = Builder.CreateExtractElement(coordArg, i); Value *undefF = UndefValue::get(Type::getFloatTy(CI->getContext())); for (unsigned i = coordDimensions; i < kMaxCoordDimensions; i++) coord[i] = undefF; } void TranslateOffset(CallInst *CI, unsigned offsetIdx) { IntegerType *i32Ty = Type::getInt32Ty(CI->getContext()); if (Value *offsetArg = ReadHLOperand(CI, offsetIdx)) { DXASSERT(offsetArg->getType()->getVectorNumElements() == offsetDimensions, "otherwise, HL coordinate dimensions mismatch"); IRBuilder<> Builder(CI); for (unsigned i = 0; i < offsetDimensions; i++) offset[i] = Builder.CreateExtractElement(offsetArg, i); } else { // Use zeros for offsets when not specified, not undef. Value *zero = ConstantInt::get(i32Ty, (uint64_t)0); for (unsigned i = 0; i < offsetDimensions; i++) offset[i] = zero; } // Use undef for components that should not be used for this resource dim. Value *undefI = UndefValue::get(i32Ty); for (unsigned i = offsetDimensions; i < kMaxOffsetDimensions; i++) offset[i] = undefI; } void SetBias(CallInst *CI, unsigned biasIdx) { // Clamp bias for immediate. bias = ReadHLOperand(CI, biasIdx); DXASSERT_NOMSG(bias); if (ConstantFP *FP = dyn_cast<ConstantFP>(bias)) { float v = FP->getValueAPF().convertToFloat(); if (v > DXIL::kMaxMipLodBias) bias = ConstantFP::get(FP->getType(), DXIL::kMaxMipLodBias); if (v < DXIL::kMinMipLodBias) bias = ConstantFP::get(FP->getType(), DXIL::kMinMipLodBias); } } void SetLOD(CallInst *CI, unsigned lodIdx) { lod = ReadHLOperand(CI, lodIdx); DXASSERT_NOMSG(lod); } void SetCompareValue(CallInst *CI, unsigned cmpIdx) { compareValue = ReadHLOperand(CI, cmpIdx); DXASSERT_NOMSG(compareValue); } void SetClamp(CallInst *CI, unsigned clampIdx) { if ((clamp = ReadHLOperand(CI, clampIdx))) { if (clamp->getType()->isVectorTy()) { IRBuilder<> Builder(CI); clamp = Builder.CreateExtractElement(clamp, (uint64_t)0); } } else clamp = UndefValue::get(Type::getFloatTy(CI->getContext())); } void SetStatus(CallInst *CI, unsigned statusIdx) { status = ReadHLOperand(CI, statusIdx); } void SetDDX(CallInst *CI, unsigned ddxIdx) { SetDDXY(CI, ddx, ReadHLOperand(CI, ddxIdx)); } void SetDDY(CallInst *CI, unsigned ddyIdx) { SetDDXY(CI, ddy, ReadHLOperand(CI, ddyIdx)); } void SetDDXY(CallInst *CI, MutableArrayRef<Value *> ddxy, Value *ddxyArg) { DXASSERT_NOMSG(ddxyArg); IRBuilder<> Builder(CI); unsigned ddxySize = ddxyArg->getType()->getVectorNumElements(); for (unsigned i = 0; i < ddxySize; i++) ddxy[i] = Builder.CreateExtractElement(ddxyArg, i); Value *undefF = UndefValue::get(Type::getFloatTy(CI->getContext())); for (unsigned i = ddxySize; i < kMaxDDXYDimensions; i++) ddxy[i] = undefF; } }; SampleHelper::SampleHelper(CallInst *CI, OP::OpCode op, HLObjectOperationLowerHelper *pObjHelper) : opcode(op) { texHandle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); resourceKind = pObjHelper->GetRK(texHandle); if (resourceKind == DXIL::ResourceKind::Invalid) { opcode = DXIL::OpCode::NumOpCodes; return; } coordDimensions = opcode == DXIL::OpCode::CalculateLOD ? DxilResource::GetNumDimensionsForCalcLOD(resourceKind) : DxilResource::GetNumCoords(resourceKind); offsetDimensions = DxilResource::GetNumOffsets(resourceKind); const bool bFeedbackOp = hlsl::OP::IsDxilOpFeedback(op); sampledTexHandle = bFeedbackOp ? CI->getArgOperand( HLOperandIndex::kWriteSamplerFeedbackSampledArgIndex) : nullptr; const unsigned kSamplerArgIndex = bFeedbackOp ? HLOperandIndex::kWriteSamplerFeedbackSamplerArgIndex : HLOperandIndex::kSampleSamplerArgIndex; samplerHandle = CI->getArgOperand(kSamplerArgIndex); const unsigned kCoordArgIdx = bFeedbackOp ? HLOperandIndex::kWriteSamplerFeedbackCoordArgIndex : HLOperandIndex::kSampleCoordArgIndex; TranslateCoord(CI, kCoordArgIdx); // TextureCube does not support offsets, shifting each subsequent arg index // down by 1 unsigned cube = (resourceKind == DXIL::ResourceKind::TextureCube || resourceKind == DXIL::ResourceKind::TextureCubeArray) ? 1 : 0; switch (op) { case OP::OpCode::Sample: TranslateOffset(CI, cube ? HLOperandIndex::kInvalidIdx : HLOperandIndex::kSampleOffsetArgIndex); SetClamp(CI, HLOperandIndex::kSampleClampArgIndex - cube); SetStatus(CI, HLOperandIndex::kSampleStatusArgIndex - cube); break; case OP::OpCode::SampleLevel: SetLOD(CI, HLOperandIndex::kSampleLLevelArgIndex); TranslateOffset(CI, cube ? HLOperandIndex::kInvalidIdx : HLOperandIndex::kSampleLOffsetArgIndex); SetStatus(CI, HLOperandIndex::kSampleLStatusArgIndex - cube); break; case OP::OpCode::SampleBias: SetBias(CI, HLOperandIndex::kSampleBBiasArgIndex); TranslateOffset(CI, cube ? HLOperandIndex::kInvalidIdx : HLOperandIndex::kSampleBOffsetArgIndex); SetClamp(CI, HLOperandIndex::kSampleBClampArgIndex - cube); SetStatus(CI, HLOperandIndex::kSampleBStatusArgIndex - cube); break; case OP::OpCode::SampleCmp: SetCompareValue(CI, HLOperandIndex::kSampleCmpCmpValArgIndex); TranslateOffset(CI, cube ? HLOperandIndex::kInvalidIdx : HLOperandIndex::kSampleCmpOffsetArgIndex); SetClamp(CI, HLOperandIndex::kSampleCmpClampArgIndex - cube); SetStatus(CI, HLOperandIndex::kSampleCmpStatusArgIndex - cube); break; case OP::OpCode::SampleCmpBias: SetBias(CI, HLOperandIndex::kSampleCmpBBiasArgIndex); SetCompareValue(CI, HLOperandIndex::kSampleCmpBCmpValArgIndex); TranslateOffset(CI, cube ? HLOperandIndex::kInvalidIdx : HLOperandIndex::kSampleCmpBOffsetArgIndex); SetClamp(CI, HLOperandIndex::kSampleCmpBClampArgIndex - cube); SetStatus(CI, HLOperandIndex::kSampleCmpBStatusArgIndex - cube); break; case OP::OpCode::SampleCmpGrad: SetDDX(CI, HLOperandIndex::kSampleCmpGDDXArgIndex); SetDDY(CI, HLOperandIndex::kSampleCmpGDDYArgIndex); SetCompareValue(CI, HLOperandIndex::kSampleCmpGCmpValArgIndex); TranslateOffset(CI, cube ? HLOperandIndex::kInvalidIdx : HLOperandIndex::kSampleCmpGOffsetArgIndex); SetClamp(CI, HLOperandIndex::kSampleCmpGClampArgIndex - cube); SetStatus(CI, HLOperandIndex::kSampleCmpGStatusArgIndex - cube); break; case OP::OpCode::SampleCmpLevel: SetCompareValue(CI, HLOperandIndex::kSampleCmpCmpValArgIndex); TranslateOffset(CI, cube ? HLOperandIndex::kInvalidIdx : HLOperandIndex::kSampleCmpLOffsetArgIndex); SetLOD(CI, HLOperandIndex::kSampleCmpLLevelArgIndex); SetStatus(CI, HLOperandIndex::kSampleCmpStatusArgIndex - cube); break; case OP::OpCode::SampleCmpLevelZero: SetCompareValue(CI, HLOperandIndex::kSampleCmpLZCmpValArgIndex); TranslateOffset(CI, cube ? HLOperandIndex::kInvalidIdx : HLOperandIndex::kSampleCmpLZOffsetArgIndex); SetStatus(CI, HLOperandIndex::kSampleCmpLZStatusArgIndex - cube); break; case OP::OpCode::SampleGrad: SetDDX(CI, HLOperandIndex::kSampleGDDXArgIndex); SetDDY(CI, HLOperandIndex::kSampleGDDYArgIndex); TranslateOffset(CI, cube ? HLOperandIndex::kInvalidIdx : HLOperandIndex::kSampleGOffsetArgIndex); SetClamp(CI, HLOperandIndex::kSampleGClampArgIndex - cube); SetStatus(CI, HLOperandIndex::kSampleGStatusArgIndex - cube); break; case OP::OpCode::CalculateLOD: // Only need coord for LOD calculation. break; case OP::OpCode::WriteSamplerFeedback: SetClamp(CI, HLOperandIndex::kWriteSamplerFeedback_ClampArgIndex); break; case OP::OpCode::WriteSamplerFeedbackBias: SetBias(CI, HLOperandIndex::kWriteSamplerFeedbackBias_BiasArgIndex); SetClamp(CI, HLOperandIndex::kWriteSamplerFeedbackBias_ClampArgIndex); break; case OP::OpCode::WriteSamplerFeedbackGrad: SetDDX(CI, HLOperandIndex::kWriteSamplerFeedbackGrad_DdxArgIndex); SetDDY(CI, HLOperandIndex::kWriteSamplerFeedbackGrad_DdyArgIndex); SetClamp(CI, HLOperandIndex::kWriteSamplerFeedbackGrad_ClampArgIndex); break; case OP::OpCode::WriteSamplerFeedbackLevel: SetLOD(CI, HLOperandIndex::kWriteSamplerFeedbackLevel_LodArgIndex); break; default: DXASSERT(0, "invalid opcode for Sample"); break; } DXASSERT(maxHLOperandRead == CI->getNumArgOperands() - 1, "otherwise, unused HL arguments for Sample op"); } Value *TranslateCalculateLOD(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; SampleHelper sampleHelper(CI, OP::OpCode::CalculateLOD, pObjHelper); if (sampleHelper.opcode == DXIL::OpCode::NumOpCodes) { Translated = false; return nullptr; } bool bClamped = IOP == IntrinsicOp::MOP_CalculateLevelOfDetail; IRBuilder<> Builder(CI); Value *opArg = hlslOP->GetU32Const(static_cast<unsigned>(OP::OpCode::CalculateLOD)); Value *clamped = hlslOP->GetI1Const(bClamped); Value *args[] = {opArg, sampleHelper.texHandle, sampleHelper.samplerHandle, sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], clamped}; Function *dxilFunc = hlslOP->GetOpFunc(OP::OpCode::CalculateLOD, Type::getFloatTy(opArg->getContext())); Value *LOD = Builder.CreateCall(dxilFunc, args); return LOD; } Value *TranslateCheckAccess(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { // Translate CheckAccess into uint->bool, later optimization should remove it. // Real checkaccess is generated in UpdateStatus. IRBuilder<> Builder(CI); Value *V = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); return Builder.CreateTrunc(V, helper.i1Ty); } void GenerateDxilSample(CallInst *CI, Function *F, ArrayRef<Value *> sampleArgs, Value *status, hlsl::OP *hlslOp) { IRBuilder<> Builder(CI); CallInst *call = Builder.CreateCall(F, sampleArgs); dxilutil::MigrateDebugValue(CI, call); // extract value part Value *retVal = ScalarizeResRet(CI->getType(), call, Builder); // Replace ret val. CI->replaceAllUsesWith(retVal); // get status if (status) { UpdateStatus(call, status, Builder, hlslOp); } } Value *TranslateSample(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; SampleHelper sampleHelper(CI, opcode, pObjHelper); if (sampleHelper.opcode == DXIL::OpCode::NumOpCodes) { Translated = false; return nullptr; } Type *Ty = CI->getType(); Function *F = hlslOP->GetOpFunc(opcode, Ty->getScalarType()); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); switch (opcode) { case OP::OpCode::Sample: { Value *sampleArgs[] = { opArg, sampleHelper.texHandle, sampleHelper.samplerHandle, // Coord. sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], sampleHelper.coord[3], // Offset. sampleHelper.offset[0], sampleHelper.offset[1], sampleHelper.offset[2], // Clamp. sampleHelper.clamp}; GenerateDxilSample(CI, F, sampleArgs, sampleHelper.status, hlslOP); } break; case OP::OpCode::SampleLevel: { Value *sampleArgs[] = { opArg, sampleHelper.texHandle, sampleHelper.samplerHandle, // Coord. sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], sampleHelper.coord[3], // Offset. sampleHelper.offset[0], sampleHelper.offset[1], sampleHelper.offset[2], // LOD. sampleHelper.lod}; GenerateDxilSample(CI, F, sampleArgs, sampleHelper.status, hlslOP); } break; case OP::OpCode::SampleGrad: { Value *sampleArgs[] = { opArg, sampleHelper.texHandle, sampleHelper.samplerHandle, // Coord. sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], sampleHelper.coord[3], // Offset. sampleHelper.offset[0], sampleHelper.offset[1], sampleHelper.offset[2], // Ddx. sampleHelper.ddx[0], sampleHelper.ddx[1], sampleHelper.ddx[2], // Ddy. sampleHelper.ddy[0], sampleHelper.ddy[1], sampleHelper.ddy[2], // Clamp. sampleHelper.clamp}; GenerateDxilSample(CI, F, sampleArgs, sampleHelper.status, hlslOP); } break; case OP::OpCode::SampleBias: { Value *sampleArgs[] = { opArg, sampleHelper.texHandle, sampleHelper.samplerHandle, // Coord. sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], sampleHelper.coord[3], // Offset. sampleHelper.offset[0], sampleHelper.offset[1], sampleHelper.offset[2], // Bias. sampleHelper.bias, // Clamp. sampleHelper.clamp}; GenerateDxilSample(CI, F, sampleArgs, sampleHelper.status, hlslOP); } break; case OP::OpCode::SampleCmpBias: { Value *sampleArgs[] = { opArg, sampleHelper.texHandle, sampleHelper.samplerHandle, // Coord. sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], sampleHelper.coord[3], // Offset. sampleHelper.offset[0], sampleHelper.offset[1], sampleHelper.offset[2], // CmpVal. sampleHelper.compareValue, // Bias. sampleHelper.bias, // Clamp. sampleHelper.clamp}; GenerateDxilSample(CI, F, sampleArgs, sampleHelper.status, hlslOP); } break; case OP::OpCode::SampleCmpGrad: { Value *sampleArgs[] = { opArg, sampleHelper.texHandle, sampleHelper.samplerHandle, // Coord. sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], sampleHelper.coord[3], // Offset. sampleHelper.offset[0], sampleHelper.offset[1], sampleHelper.offset[2], // CmpVal. sampleHelper.compareValue, // Ddx. sampleHelper.ddx[0], sampleHelper.ddx[1], sampleHelper.ddx[2], // Ddy. sampleHelper.ddy[0], sampleHelper.ddy[1], sampleHelper.ddy[2], // Clamp. sampleHelper.clamp}; GenerateDxilSample(CI, F, sampleArgs, sampleHelper.status, hlslOP); } break; case OP::OpCode::SampleCmp: { Value *sampleArgs[] = { opArg, sampleHelper.texHandle, sampleHelper.samplerHandle, // Coord. sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], sampleHelper.coord[3], // Offset. sampleHelper.offset[0], sampleHelper.offset[1], sampleHelper.offset[2], // CmpVal. sampleHelper.compareValue, // Clamp. sampleHelper.clamp}; GenerateDxilSample(CI, F, sampleArgs, sampleHelper.status, hlslOP); } break; case OP::OpCode::SampleCmpLevel: { Value *sampleArgs[] = { opArg, sampleHelper.texHandle, sampleHelper.samplerHandle, // Coord. sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], sampleHelper.coord[3], // Offset. sampleHelper.offset[0], sampleHelper.offset[1], sampleHelper.offset[2], // CmpVal. sampleHelper.compareValue, // LOD. sampleHelper.lod}; GenerateDxilSample(CI, F, sampleArgs, sampleHelper.status, hlslOP); } break; case OP::OpCode::SampleCmpLevelZero: default: { DXASSERT(opcode == OP::OpCode::SampleCmpLevelZero, "invalid sample opcode"); Value *sampleArgs[] = { opArg, sampleHelper.texHandle, sampleHelper.samplerHandle, // Coord. sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], sampleHelper.coord[3], // Offset. sampleHelper.offset[0], sampleHelper.offset[1], sampleHelper.offset[2], // CmpVal. sampleHelper.compareValue}; GenerateDxilSample(CI, F, sampleArgs, sampleHelper.status, hlslOP); } break; } // CI is replaced in GenerateDxilSample. return nullptr; } // Gather intrinsics. struct GatherHelper { enum class GatherChannel { GatherAll, GatherRed, GatherGreen, GatherBlue, GatherAlpha, }; GatherHelper(CallInst *CI, OP::OpCode op, HLObjectOperationLowerHelper *pObjHelper, GatherHelper::GatherChannel ch); OP::OpCode opcode; Value *texHandle; Value *samplerHandle; static const unsigned kMaxCoordDimensions = 4; Value *coord[kMaxCoordDimensions]; unsigned channel; Value *special; // For CompareValue, Bias, LOD. // Optional. static const unsigned kMaxOffsetDimensions = 2; Value *offset[kMaxOffsetDimensions]; // For the overload send different offset for each sample. // Only save 3 sampleOffsets because use offset for normal overload as first // sample offset. static const unsigned kSampleOffsetDimensions = 3; Value *sampleOffsets[kSampleOffsetDimensions][kMaxOffsetDimensions]; Value *status; bool hasSampleOffsets; unsigned maxHLOperandRead = 0; Value *ReadHLOperand(CallInst *CI, unsigned opIdx) { if (CI->getNumArgOperands() > opIdx) { maxHLOperandRead = std::max(maxHLOperandRead, opIdx); return CI->getArgOperand(opIdx); } return nullptr; } void TranslateCoord(CallInst *CI, unsigned coordIdx, unsigned coordDimensions) { Value *coordArg = ReadHLOperand(CI, coordIdx); DXASSERT_NOMSG(coordArg); DXASSERT(coordArg->getType()->getVectorNumElements() == coordDimensions, "otherwise, HL coordinate dimensions mismatch"); IRBuilder<> Builder(CI); for (unsigned i = 0; i < coordDimensions; i++) coord[i] = Builder.CreateExtractElement(coordArg, i); Value *undefF = UndefValue::get(Type::getFloatTy(CI->getContext())); for (unsigned i = coordDimensions; i < kMaxCoordDimensions; i++) coord[i] = undefF; } void SetStatus(CallInst *CI, unsigned statusIdx) { status = ReadHLOperand(CI, statusIdx); } void TranslateOffset(CallInst *CI, unsigned offsetIdx, unsigned offsetDimensions) { IntegerType *i32Ty = Type::getInt32Ty(CI->getContext()); if (Value *offsetArg = ReadHLOperand(CI, offsetIdx)) { DXASSERT(offsetArg->getType()->getVectorNumElements() == offsetDimensions, "otherwise, HL coordinate dimensions mismatch"); IRBuilder<> Builder(CI); for (unsigned i = 0; i < offsetDimensions; i++) offset[i] = Builder.CreateExtractElement(offsetArg, i); } else { // Use zeros for offsets when not specified, not undef. Value *zero = ConstantInt::get(i32Ty, (uint64_t)0); for (unsigned i = 0; i < offsetDimensions; i++) offset[i] = zero; } // Use undef for components that should not be used for this resource dim. Value *undefI = UndefValue::get(i32Ty); for (unsigned i = offsetDimensions; i < kMaxOffsetDimensions; i++) offset[i] = undefI; } void TranslateSampleOffset(CallInst *CI, unsigned offsetIdx, unsigned offsetDimensions) { Value *undefI = UndefValue::get(Type::getInt32Ty(CI->getContext())); if (CI->getNumArgOperands() >= (offsetIdx + kSampleOffsetDimensions)) { hasSampleOffsets = true; IRBuilder<> Builder(CI); for (unsigned ch = 0; ch < kSampleOffsetDimensions; ch++) { Value *offsetArg = ReadHLOperand(CI, offsetIdx + ch); for (unsigned i = 0; i < offsetDimensions; i++) sampleOffsets[ch][i] = Builder.CreateExtractElement(offsetArg, i); for (unsigned i = offsetDimensions; i < kMaxOffsetDimensions; i++) sampleOffsets[ch][i] = undefI; } } } // Update the offset args for gather with sample offset at sampleIdx. void UpdateOffsetInGatherArgs(MutableArrayRef<Value *> gatherArgs, unsigned sampleIdx) { unsigned offsetBase = DXIL::OperandIndex::kTextureGatherOffset0OpIdx; for (unsigned i = 0; i < kMaxOffsetDimensions; i++) // -1 because offset for sample 0 is in GatherHelper::offset. gatherArgs[offsetBase + i] = sampleOffsets[sampleIdx - 1][i]; } }; GatherHelper::GatherHelper(CallInst *CI, OP::OpCode op, HLObjectOperationLowerHelper *pObjHelper, GatherHelper::GatherChannel ch) : opcode(op), special(nullptr), hasSampleOffsets(false) { switch (ch) { case GatherChannel::GatherAll: channel = 0; break; case GatherChannel::GatherRed: channel = 0; break; case GatherChannel::GatherGreen: channel = 1; break; case GatherChannel::GatherBlue: channel = 2; break; case GatherChannel::GatherAlpha: channel = 3; break; } IRBuilder<> Builder(CI); texHandle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); samplerHandle = CI->getArgOperand(HLOperandIndex::kSampleSamplerArgIndex); DXIL::ResourceKind RK = pObjHelper->GetRK(texHandle); if (RK == DXIL::ResourceKind::Invalid) { opcode = DXIL::OpCode::NumOpCodes; return; } unsigned coordSize = DxilResource::GetNumCoords(RK); unsigned offsetSize = DxilResource::GetNumOffsets(RK); bool cube = RK == DXIL::ResourceKind::TextureCube || RK == DXIL::ResourceKind::TextureCubeArray; const unsigned kCoordArgIdx = HLOperandIndex::kSampleCoordArgIndex; TranslateCoord(CI, kCoordArgIdx, coordSize); switch (op) { case OP::OpCode::TextureGather: { unsigned statusIdx; if (cube) { TranslateOffset(CI, HLOperandIndex::kInvalidIdx, offsetSize); statusIdx = HLOperandIndex::kGatherCubeStatusArgIndex; } else { TranslateOffset(CI, HLOperandIndex::kGatherOffsetArgIndex, offsetSize); // Gather all don't have sample offset version overload. if (ch != GatherChannel::GatherAll) TranslateSampleOffset(CI, HLOperandIndex::kGatherSampleOffsetArgIndex, offsetSize); statusIdx = hasSampleOffsets ? HLOperandIndex::kGatherStatusWithSampleOffsetArgIndex : HLOperandIndex::kGatherStatusArgIndex; } SetStatus(CI, statusIdx); } break; case OP::OpCode::TextureGatherCmp: { special = ReadHLOperand(CI, HLOperandIndex::kGatherCmpCmpValArgIndex); unsigned statusIdx; if (cube) { TranslateOffset(CI, HLOperandIndex::kInvalidIdx, offsetSize); statusIdx = HLOperandIndex::kGatherCmpCubeStatusArgIndex; } else { TranslateOffset(CI, HLOperandIndex::kGatherCmpOffsetArgIndex, offsetSize); // Gather all don't have sample offset version overload. if (ch != GatherChannel::GatherAll) TranslateSampleOffset( CI, HLOperandIndex::kGatherCmpSampleOffsetArgIndex, offsetSize); statusIdx = hasSampleOffsets ? HLOperandIndex::kGatherCmpStatusWithSampleOffsetArgIndex : HLOperandIndex::kGatherCmpStatusArgIndex; } SetStatus(CI, statusIdx); } break; case OP::OpCode::TextureGatherRaw: { unsigned statusIdx; TranslateOffset(CI, HLOperandIndex::kGatherOffsetArgIndex, offsetSize); // Gather all don't have sample offset version overload. DXASSERT(ch == GatherChannel::GatherAll, "Raw gather must use all channels"); DXASSERT(!cube, "Raw gather can't be used with cube textures"); DXASSERT(!hasSampleOffsets, "Raw gather doesn't support individual offsets"); statusIdx = HLOperandIndex::kGatherStatusArgIndex; SetStatus(CI, statusIdx); } break; default: DXASSERT(0, "invalid opcode for Gather"); break; } DXASSERT(maxHLOperandRead == CI->getNumArgOperands() - 1, "otherwise, unused HL arguments for Sample op"); } void GenerateDxilGather(CallInst *CI, Function *F, MutableArrayRef<Value *> gatherArgs, GatherHelper &helper, hlsl::OP *hlslOp) { IRBuilder<> Builder(CI); CallInst *call = Builder.CreateCall(F, gatherArgs); dxilutil::MigrateDebugValue(CI, call); Value *retVal; if (!helper.hasSampleOffsets) { // extract value part retVal = ScalarizeResRet(CI->getType(), call, Builder); } else { retVal = UndefValue::get(CI->getType()); Value *elt = Builder.CreateExtractValue(call, (uint64_t)0); retVal = Builder.CreateInsertElement(retVal, elt, (uint64_t)0); helper.UpdateOffsetInGatherArgs(gatherArgs, /*sampleIdx*/ 1); CallInst *callY = Builder.CreateCall(F, gatherArgs); elt = Builder.CreateExtractValue(callY, (uint64_t)1); retVal = Builder.CreateInsertElement(retVal, elt, 1); helper.UpdateOffsetInGatherArgs(gatherArgs, /*sampleIdx*/ 2); CallInst *callZ = Builder.CreateCall(F, gatherArgs); elt = Builder.CreateExtractValue(callZ, (uint64_t)2); retVal = Builder.CreateInsertElement(retVal, elt, 2); helper.UpdateOffsetInGatherArgs(gatherArgs, /*sampleIdx*/ 3); CallInst *callW = Builder.CreateCall(F, gatherArgs); elt = Builder.CreateExtractValue(callW, (uint64_t)3); retVal = Builder.CreateInsertElement(retVal, elt, 3); // TODO: UpdateStatus for each gather call. } // Replace ret val. CI->replaceAllUsesWith(retVal); // Get status if (helper.status) { UpdateStatus(call, helper.status, Builder, hlslOp); } } Value *TranslateGather(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; GatherHelper::GatherChannel ch = GatherHelper::GatherChannel::GatherAll; switch (IOP) { case IntrinsicOp::MOP_Gather: case IntrinsicOp::MOP_GatherCmp: case IntrinsicOp::MOP_GatherRaw: ch = GatherHelper::GatherChannel::GatherAll; break; case IntrinsicOp::MOP_GatherRed: case IntrinsicOp::MOP_GatherCmpRed: ch = GatherHelper::GatherChannel::GatherRed; break; case IntrinsicOp::MOP_GatherGreen: case IntrinsicOp::MOP_GatherCmpGreen: ch = GatherHelper::GatherChannel::GatherGreen; break; case IntrinsicOp::MOP_GatherBlue: case IntrinsicOp::MOP_GatherCmpBlue: ch = GatherHelper::GatherChannel::GatherBlue; break; case IntrinsicOp::MOP_GatherAlpha: case IntrinsicOp::MOP_GatherCmpAlpha: ch = GatherHelper::GatherChannel::GatherAlpha; break; default: DXASSERT(0, "invalid gather intrinsic"); break; } GatherHelper gatherHelper(CI, opcode, pObjHelper, ch); if (gatherHelper.opcode == DXIL::OpCode::NumOpCodes) { Translated = false; return nullptr; } Type *Ty = CI->getType(); Function *F = hlslOP->GetOpFunc(gatherHelper.opcode, Ty->getScalarType()); Constant *opArg = hlslOP->GetU32Const((unsigned)gatherHelper.opcode); Value *channelArg = hlslOP->GetU32Const(gatherHelper.channel); switch (opcode) { case OP::OpCode::TextureGather: { Value *gatherArgs[] = {opArg, gatherHelper.texHandle, gatherHelper.samplerHandle, // Coord. gatherHelper.coord[0], gatherHelper.coord[1], gatherHelper.coord[2], gatherHelper.coord[3], // Offset. gatherHelper.offset[0], gatherHelper.offset[1], // Channel. channelArg}; GenerateDxilGather(CI, F, gatherArgs, gatherHelper, hlslOP); } break; case OP::OpCode::TextureGatherCmp: { Value *gatherArgs[] = {opArg, gatherHelper.texHandle, gatherHelper.samplerHandle, // Coord. gatherHelper.coord[0], gatherHelper.coord[1], gatherHelper.coord[2], gatherHelper.coord[3], // Offset. gatherHelper.offset[0], gatherHelper.offset[1], // Channel. channelArg, // CmpVal. gatherHelper.special}; GenerateDxilGather(CI, F, gatherArgs, gatherHelper, hlslOP); } break; case OP::OpCode::TextureGatherRaw: { Value *gatherArgs[] = {opArg, gatherHelper.texHandle, gatherHelper.samplerHandle, // Coord. gatherHelper.coord[0], gatherHelper.coord[1], gatherHelper.coord[2], gatherHelper.coord[3], // Offset. gatherHelper.offset[0], gatherHelper.offset[1]}; GenerateDxilGather(CI, F, gatherArgs, gatherHelper, hlslOP); break; } default: DXASSERT(0, "invalid opcode for Gather"); break; } // CI is replaced in GenerateDxilGather. return nullptr; } static Value * TranslateWriteSamplerFeedback(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; SampleHelper sampleHelper(CI, opcode, pObjHelper); if (sampleHelper.opcode == DXIL::OpCode::NumOpCodes) { Translated = false; return nullptr; } Type *Ty = CI->getType(); Function *F = hlslOP->GetOpFunc(opcode, Ty->getScalarType()); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); IRBuilder<> Builder(CI); switch (opcode) { case OP::OpCode::WriteSamplerFeedback: { Value *samplerFeedbackArgs[] = { opArg, sampleHelper.texHandle, sampleHelper.sampledTexHandle, sampleHelper.samplerHandle, // Coord. sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], sampleHelper.coord[3], // Clamp. sampleHelper.clamp}; return Builder.CreateCall(F, samplerFeedbackArgs); } break; case OP::OpCode::WriteSamplerFeedbackBias: { Value *samplerFeedbackArgs[] = { opArg, sampleHelper.texHandle, sampleHelper.sampledTexHandle, sampleHelper.samplerHandle, // Coord. sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], sampleHelper.coord[3], // Bias. sampleHelper.bias, // Clamp. sampleHelper.clamp}; return Builder.CreateCall(F, samplerFeedbackArgs); } break; case OP::OpCode::WriteSamplerFeedbackGrad: { Value *samplerFeedbackArgs[] = { opArg, sampleHelper.texHandle, sampleHelper.sampledTexHandle, sampleHelper.samplerHandle, // Coord. sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], sampleHelper.coord[3], // Ddx. sampleHelper.ddx[0], sampleHelper.ddx[1], sampleHelper.ddx[2], // Ddy. sampleHelper.ddy[0], sampleHelper.ddy[1], sampleHelper.ddy[2], // Clamp. sampleHelper.clamp}; return Builder.CreateCall(F, samplerFeedbackArgs); } break; case OP::OpCode::WriteSamplerFeedbackLevel: { Value *samplerFeedbackArgs[] = { opArg, sampleHelper.texHandle, sampleHelper.sampledTexHandle, sampleHelper.samplerHandle, // Coord. sampleHelper.coord[0], sampleHelper.coord[1], sampleHelper.coord[2], sampleHelper.coord[3], // LOD. sampleHelper.lod}; return Builder.CreateCall(F, samplerFeedbackArgs); } break; default: DXASSERT(false, "otherwise, unknown SamplerFeedback Op"); break; } return nullptr; } // Load/Store intrinsics. struct ResLoadHelper { ResLoadHelper(CallInst *CI, DxilResource::Kind RK, DxilResourceBase::Class RC, Value *h, IntrinsicOp IOP, bool bForSubscript = false); // For double subscript. ResLoadHelper(Instruction *ldInst, Value *h, Value *idx, Value *mip) : opcode(OP::OpCode::TextureLoad), intrinsicOpCode(IntrinsicOp::Num_Intrinsics), handle(h), retVal(ldInst), addr(idx), offset(nullptr), status(nullptr), mipLevel(mip) {} OP::OpCode opcode; IntrinsicOp intrinsicOpCode; unsigned dxilMajor; unsigned dxilMinor; Value *handle; Value *retVal; Value *addr; Value *offset; Value *status; Value *mipLevel; }; ResLoadHelper::ResLoadHelper(CallInst *CI, DxilResource::Kind RK, DxilResourceBase::Class RC, Value *hdl, IntrinsicOp IOP, bool bForSubscript) : intrinsicOpCode(IOP), handle(hdl), offset(nullptr), status(nullptr) { switch (RK) { case DxilResource::Kind::RawBuffer: case DxilResource::Kind::StructuredBuffer: opcode = OP::OpCode::RawBufferLoad; break; case DxilResource::Kind::TypedBuffer: opcode = OP::OpCode::BufferLoad; break; case DxilResource::Kind::Invalid: DXASSERT(0, "invalid resource kind"); break; default: opcode = OP::OpCode::TextureLoad; break; } retVal = CI; const unsigned kAddrIdx = HLOperandIndex::kBufLoadAddrOpIdx; addr = CI->getArgOperand(kAddrIdx); unsigned argc = CI->getNumArgOperands(); if (opcode == OP::OpCode::TextureLoad) { // mip at last channel unsigned coordSize = DxilResource::GetNumCoords(RK); if (RC == DxilResourceBase::Class::SRV) { if (bForSubscript) { // Use 0 when access by []. mipLevel = IRBuilder<>(CI).getInt32(0); } else { if (coordSize == 1 && !addr->getType()->isVectorTy()) { // Use addr when access by Load. mipLevel = addr; } else { mipLevel = IRBuilder<>(CI).CreateExtractElement(addr, coordSize); } } } else { // Set mip level to undef for UAV. mipLevel = UndefValue::get(Type::getInt32Ty(addr->getContext())); } if (RC == DxilResourceBase::Class::SRV) { unsigned offsetIdx = HLOperandIndex::kTexLoadOffsetOpIdx; unsigned statusIdx = HLOperandIndex::kTexLoadStatusOpIdx; if (RK == DxilResource::Kind::Texture2DMS || RK == DxilResource::Kind::Texture2DMSArray) { offsetIdx = HLOperandIndex::kTex2DMSLoadOffsetOpIdx; statusIdx = HLOperandIndex::kTex2DMSLoadStatusOpIdx; mipLevel = CI->getArgOperand(HLOperandIndex::kTex2DMSLoadSampleIdxOpIdx); } if (argc > offsetIdx) offset = CI->getArgOperand(offsetIdx); if (argc > statusIdx) status = CI->getArgOperand(statusIdx); } else if (RC == DxilResourceBase::Class::UAV && (RK == DxilResource::Kind::Texture2DMS || RK == DxilResource::Kind::Texture2DMSArray)) { unsigned statusIdx = HLOperandIndex::kTex2DMSLoadStatusOpIdx; mipLevel = CI->getArgOperand(HLOperandIndex::kTex2DMSLoadSampleIdxOpIdx); if (argc > statusIdx) status = CI->getArgOperand(statusIdx); } else { const unsigned kStatusIdx = HLOperandIndex::kRWTexLoadStatusOpIdx; if (argc > kStatusIdx) status = CI->getArgOperand(kStatusIdx); } } else { const unsigned kStatusIdx = HLOperandIndex::kBufLoadStatusOpIdx; if (argc > kStatusIdx) status = CI->getArgOperand(kStatusIdx); } } void TranslateStructBufSubscript(CallInst *CI, Value *handle, Value *status, hlsl::OP *OP, HLResource::Kind RK, const DataLayout &DL); // Create { v0, v1 } from { v0.lo, v0.hi, v1.lo, v1.hi } void Make64bitResultForLoad(Type *EltTy, ArrayRef<Value *> resultElts32, unsigned size, MutableArrayRef<Value *> resultElts, hlsl::OP *hlslOP, IRBuilder<> &Builder) { Type *i64Ty = Builder.getInt64Ty(); Type *doubleTy = Builder.getDoubleTy(); if (EltTy == doubleTy) { Function *makeDouble = hlslOP->GetOpFunc(DXIL::OpCode::MakeDouble, doubleTy); Value *makeDoubleOpArg = Builder.getInt32((unsigned)DXIL::OpCode::MakeDouble); for (unsigned i = 0; i < size; i++) { Value *lo = resultElts32[2 * i]; Value *hi = resultElts32[2 * i + 1]; Value *V = Builder.CreateCall(makeDouble, {makeDoubleOpArg, lo, hi}); resultElts[i] = V; } } else { for (unsigned i = 0; i < size; i++) { Value *lo = resultElts32[2 * i]; Value *hi = resultElts32[2 * i + 1]; lo = Builder.CreateZExt(lo, i64Ty); hi = Builder.CreateZExt(hi, i64Ty); hi = Builder.CreateShl(hi, 32); resultElts[i] = Builder.CreateOr(lo, hi); } } } static Constant *GetRawBufferMaskForETy(Type *Ty, unsigned NumComponents, hlsl::OP *OP) { unsigned mask = 0; switch (NumComponents) { case 0: break; case 1: mask = DXIL::kCompMask_X; break; case 2: mask = DXIL::kCompMask_X | DXIL::kCompMask_Y; break; case 3: mask = DXIL::kCompMask_X | DXIL::kCompMask_Y | DXIL::kCompMask_Z; break; case 4: mask = DXIL::kCompMask_All; break; default: DXASSERT(false, "Cannot load more than 2 components for 64bit types."); } return OP->GetI8Const(mask); } Value *GenerateRawBufLd(Value *handle, Value *bufIdx, Value *offset, Value *status, Type *EltTy, MutableArrayRef<Value *> resultElts, hlsl::OP *OP, IRBuilder<> &Builder, unsigned NumComponents, Constant *alignment); static Value *TranslateRawBufVecLd(Type *VecEltTy, unsigned VecElemCount, IRBuilder<> &Builder, Value *handle, hlsl::OP *OP, Value *status, Value *bufIdx, Value *baseOffset, const DataLayout &DL, std::vector<Value *> &bufLds, unsigned baseAlign, bool isScalarTy = false); void TranslateLoad(ResLoadHelper &helper, HLResource::Kind RK, IRBuilder<> &Builder, hlsl::OP *OP, const DataLayout &DL) { Type *Ty = helper.retVal->getType(); if (Ty->isPointerTy()) { DXASSERT(!DxilResource::IsAnyTexture(RK), "Textures should not be treated as structured buffers."); TranslateStructBufSubscript(cast<CallInst>(helper.retVal), helper.handle, helper.status, OP, RK, DL); return; } OP::OpCode opcode = helper.opcode; Type *i32Ty = Builder.getInt32Ty(); Type *i64Ty = Builder.getInt64Ty(); Type *doubleTy = Builder.getDoubleTy(); Type *EltTy = Ty->getScalarType(); unsigned numComponents = 1; if (Ty->isVectorTy()) { numComponents = Ty->getVectorNumElements(); } if (DXIL::IsStructuredBuffer(RK) || DXIL::IsRawBuffer(RK)) { std::vector<Value *> bufLds; const bool isBool = EltTy->isIntegerTy(1); // Bool are represented as i32 in memory Type *MemReprTy = isBool ? Builder.getInt32Ty() : EltTy; bool isScalarTy = !Ty->isVectorTy(); Value *retValNew = nullptr; if (DXIL::IsStructuredBuffer(RK)) { retValNew = TranslateRawBufVecLd( MemReprTy, numComponents, Builder, helper.handle, OP, helper.status, helper.addr, OP->GetU32Const(0), DL, bufLds, /*baseAlign (in bytes)*/ 8, isScalarTy); } else { retValNew = TranslateRawBufVecLd(MemReprTy, numComponents, Builder, helper.handle, OP, helper.status, nullptr, helper.addr, DL, bufLds, /*baseAlign (in bytes)*/ 4, isScalarTy); } DXASSERT_NOMSG(!bufLds.empty()); dxilutil::MigrateDebugValue(helper.retVal, bufLds.front()); if (isBool) { // Convert result back to register representation. retValNew = Builder.CreateICmpNE( retValNew, Constant::getNullValue(retValNew->getType())); } helper.retVal->replaceAllUsesWith(retValNew); helper.retVal = retValNew; return; } bool isTyped = opcode == OP::OpCode::TextureLoad || RK == DxilResource::Kind::TypedBuffer; bool is64 = EltTy == i64Ty || EltTy == doubleTy; if (is64 && isTyped) { EltTy = i32Ty; } bool isBool = EltTy->isIntegerTy(1); if (isBool) { // Value will be loaded in its memory representation. EltTy = i32Ty; if (Ty->isVectorTy()) Ty = VectorType::get(EltTy, numComponents); } Function *F = OP->GetOpFunc(opcode, EltTy); llvm::Constant *opArg = OP->GetU32Const((unsigned)opcode); llvm::Value *undefI = llvm::UndefValue::get(i32Ty); SmallVector<Value *, 12> loadArgs; loadArgs.emplace_back(opArg); // opcode loadArgs.emplace_back(helper.handle); // resource handle if (opcode == OP::OpCode::TextureLoad) { // set mip level loadArgs.emplace_back(helper.mipLevel); } if (opcode == OP::OpCode::TextureLoad) { // texture coord unsigned coordSize = DxilResource::GetNumCoords(RK); bool isVectorAddr = helper.addr->getType()->isVectorTy(); for (unsigned i = 0; i < 3; i++) { if (i < coordSize) { loadArgs.emplace_back(isVectorAddr ? Builder.CreateExtractElement(helper.addr, i) : helper.addr); } else loadArgs.emplace_back(undefI); } } else { if (helper.addr->getType()->isVectorTy()) { Value *scalarOffset = Builder.CreateExtractElement(helper.addr, (uint64_t)0); // TODO: calculate the real address based on opcode loadArgs.emplace_back(scalarOffset); // offset } else { // TODO: calculate the real address based on opcode loadArgs.emplace_back(helper.addr); // offset } } // offset 0 if (opcode == OP::OpCode::TextureLoad) { if (helper.offset && !isa<llvm::UndefValue>(helper.offset)) { unsigned offsetSize = DxilResource::GetNumOffsets(RK); for (unsigned i = 0; i < 3; i++) { if (i < offsetSize) loadArgs.emplace_back(Builder.CreateExtractElement(helper.offset, i)); else loadArgs.emplace_back(undefI); } } else { loadArgs.emplace_back(undefI); loadArgs.emplace_back(undefI); loadArgs.emplace_back(undefI); } } // Offset 1 if (RK == DxilResource::Kind::TypedBuffer) { loadArgs.emplace_back(undefI); } Value *ResRet = Builder.CreateCall(F, loadArgs, OP->GetOpCodeName(opcode)); dxilutil::MigrateDebugValue(helper.retVal, ResRet); Value *retValNew = nullptr; if (!is64 || !isTyped) { retValNew = ScalarizeResRet(Ty, ResRet, Builder); } else { unsigned size = numComponents; DXASSERT(size <= 2, "typed buffer only allow 4 dwords"); EltTy = Ty->getScalarType(); Value *Elts[2]; Make64bitResultForLoad(Ty->getScalarType(), { Builder.CreateExtractValue(ResRet, 0), Builder.CreateExtractValue(ResRet, 1), Builder.CreateExtractValue(ResRet, 2), Builder.CreateExtractValue(ResRet, 3), }, size, Elts, OP, Builder); retValNew = ScalarizeElements(Ty, Elts, Builder); } if (isBool) { // Convert result back to register representation. retValNew = Builder.CreateICmpNE( retValNew, Constant::getNullValue(retValNew->getType())); } // replace helper.retVal->replaceAllUsesWith(retValNew); // Save new ret val. helper.retVal = retValNew; // get status UpdateStatus(ResRet, helper.status, Builder, OP); } Value *TranslateResourceLoad(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); IRBuilder<> Builder(CI); DXIL::ResourceClass RC = pObjHelper->GetRC(handle); DXIL::ResourceKind RK = pObjHelper->GetRK(handle); ResLoadHelper loadHelper(CI, RK, RC, handle, IOP); TranslateLoad(loadHelper, RK, Builder, hlslOP, helper.dataLayout); // CI is replaced in TranslateLoad. return nullptr; } // Split { v0, v1 } to { v0.lo, v0.hi, v1.lo, v1.hi } void Split64bitValForStore(Type *EltTy, ArrayRef<Value *> vals, unsigned size, MutableArrayRef<Value *> vals32, hlsl::OP *hlslOP, IRBuilder<> &Builder) { Type *i32Ty = Builder.getInt32Ty(); Type *doubleTy = Builder.getDoubleTy(); Value *undefI32 = UndefValue::get(i32Ty); if (EltTy == doubleTy) { Function *dToU = hlslOP->GetOpFunc(DXIL::OpCode::SplitDouble, doubleTy); Value *dToUOpArg = Builder.getInt32((unsigned)DXIL::OpCode::SplitDouble); for (unsigned i = 0; i < size; i++) { if (isa<UndefValue>(vals[i])) { vals32[2 * i] = undefI32; vals32[2 * i + 1] = undefI32; } else { Value *retVal = Builder.CreateCall(dToU, {dToUOpArg, vals[i]}); Value *lo = Builder.CreateExtractValue(retVal, 0); Value *hi = Builder.CreateExtractValue(retVal, 1); vals32[2 * i] = lo; vals32[2 * i + 1] = hi; } } } else { for (unsigned i = 0; i < size; i++) { if (isa<UndefValue>(vals[i])) { vals32[2 * i] = undefI32; vals32[2 * i + 1] = undefI32; } else { Value *lo = Builder.CreateTrunc(vals[i], i32Ty); Value *hi = Builder.CreateLShr(vals[i], 32); hi = Builder.CreateTrunc(hi, i32Ty); vals32[2 * i] = lo; vals32[2 * i + 1] = hi; } } } } void TranslateStore(DxilResource::Kind RK, Value *handle, Value *val, Value *offset, IRBuilder<> &Builder, hlsl::OP *OP, Value *sampIdx = nullptr) { Type *Ty = val->getType(); // This function is no longer used for lowering stores to a // structured buffer. DXASSERT_NOMSG(RK != DxilResource::Kind::StructuredBuffer); OP::OpCode opcode = OP::OpCode::NumOpCodes; switch (RK) { case DxilResource::Kind::RawBuffer: case DxilResource::Kind::StructuredBuffer: opcode = OP::OpCode::RawBufferStore; break; case DxilResource::Kind::TypedBuffer: opcode = OP::OpCode::BufferStore; break; case DxilResource::Kind::Invalid: DXASSERT(0, "invalid resource kind"); break; case DxilResource::Kind::Texture2DMS: case DxilResource::Kind::Texture2DMSArray: opcode = OP::OpCode::TextureStoreSample; break; default: opcode = OP::OpCode::TextureStore; break; } bool isTyped = opcode == OP::OpCode::TextureStore || opcode == OP::OpCode::TextureStoreSample || RK == DxilResource::Kind::TypedBuffer; Type *i32Ty = Builder.getInt32Ty(); Type *i64Ty = Builder.getInt64Ty(); Type *doubleTy = Builder.getDoubleTy(); Type *EltTy = Ty->getScalarType(); if (EltTy->isIntegerTy(1)) { // Since we're going to memory, convert bools to their memory // representation. EltTy = i32Ty; if (Ty->isVectorTy()) Ty = VectorType::get(EltTy, Ty->getVectorNumElements()); else Ty = EltTy; val = Builder.CreateZExt(val, Ty); } // If RawBuffer store of 64-bit value, don't set alignment to 8, // since buffer alignment isn't known to be anything over 4. unsigned alignValue = OP->GetAllocSizeForType(EltTy); if (RK == HLResource::Kind::RawBuffer && alignValue > 4) alignValue = 4; Constant *Alignment = OP->GetI32Const(alignValue); bool is64 = EltTy == i64Ty || EltTy == doubleTy; if (is64 && isTyped) { EltTy = i32Ty; } Function *F = OP->GetOpFunc(opcode, EltTy); llvm::Constant *opArg = OP->GetU32Const((unsigned)opcode); llvm::Value *undefI = llvm::UndefValue::get(llvm::Type::getInt32Ty(Ty->getContext())); llvm::Value *undefVal = llvm::UndefValue::get(Ty->getScalarType()); SmallVector<Value *, 13> storeArgs; storeArgs.emplace_back(opArg); // opcode storeArgs.emplace_back(handle); // resource handle unsigned offset0Idx = 0; if (RK == DxilResource::Kind::RawBuffer || RK == DxilResource::Kind::TypedBuffer) { // Offset 0 if (offset->getType()->isVectorTy()) { Value *scalarOffset = Builder.CreateExtractElement(offset, (uint64_t)0); storeArgs.emplace_back(scalarOffset); // offset } else { storeArgs.emplace_back(offset); // offset } // Store offset0 for later use offset0Idx = storeArgs.size() - 1; // Offset 1 storeArgs.emplace_back(undefI); } else { // texture store unsigned coordSize = DxilResource::GetNumCoords(RK); // Set x first. if (offset->getType()->isVectorTy()) storeArgs.emplace_back(Builder.CreateExtractElement(offset, (uint64_t)0)); else storeArgs.emplace_back(offset); // Store offset0 for later use offset0Idx = storeArgs.size() - 1; for (unsigned i = 1; i < 3; i++) { if (i < coordSize) storeArgs.emplace_back(Builder.CreateExtractElement(offset, i)); else storeArgs.emplace_back(undefI); } // TODO: support mip for texture ST } constexpr unsigned MaxStoreElemCount = 4; const unsigned CompCount = Ty->isVectorTy() ? Ty->getVectorNumElements() : 1; const unsigned StoreInstCount = (CompCount / MaxStoreElemCount) + (CompCount % MaxStoreElemCount != 0); SmallVector<decltype(storeArgs), 4> storeArgsList; // Max number of element to store should be 16 (for a 4x4 matrix) DXASSERT_NOMSG(StoreInstCount >= 1 && StoreInstCount <= 4); // If number of elements to store exceeds the maximum number of elements // that can be stored in a single store call, make sure to generate enough // store calls to store all elements for (unsigned j = 0; j < StoreInstCount; j++) { decltype(storeArgs) newStoreArgs; for (Value *storeArg : storeArgs) newStoreArgs.emplace_back(storeArg); storeArgsList.emplace_back(newStoreArgs); } for (unsigned j = 0; j < storeArgsList.size(); j++) { // For second and subsequent store calls, increment the offset0 (i.e. store // index) if (j > 0) { // Greater than four-components store is not allowed for // TypedBuffer and Textures. So greater than four elements // scenario should only get hit here for RawBuffer. DXASSERT_NOMSG(RK == DxilResource::Kind::RawBuffer); unsigned EltSize = OP->GetAllocSizeForType(EltTy); unsigned newOffset = EltSize * MaxStoreElemCount * j; Value *newOffsetVal = ConstantInt::get(Builder.getInt32Ty(), newOffset); newOffsetVal = Builder.CreateAdd(storeArgsList[0][offset0Idx], newOffsetVal); storeArgsList[j][offset0Idx] = newOffsetVal; } // values uint8_t mask = 0; if (Ty->isVectorTy()) { unsigned vecSize = std::min((j + 1) * MaxStoreElemCount, Ty->getVectorNumElements()) - (j * MaxStoreElemCount); Value *emptyVal = undefVal; if (isTyped) { mask = DXIL::kCompMask_All; emptyVal = Builder.CreateExtractElement(val, (uint64_t)0); } for (unsigned i = 0; i < MaxStoreElemCount; i++) { if (i < vecSize) { storeArgsList[j].emplace_back( Builder.CreateExtractElement(val, (j * MaxStoreElemCount) + i)); mask |= (1 << i); } else { storeArgsList[j].emplace_back(emptyVal); } } } else { if (isTyped) { mask = DXIL::kCompMask_All; storeArgsList[j].emplace_back(val); storeArgsList[j].emplace_back(val); storeArgsList[j].emplace_back(val); storeArgsList[j].emplace_back(val); } else { storeArgsList[j].emplace_back(val); storeArgsList[j].emplace_back(undefVal); storeArgsList[j].emplace_back(undefVal); storeArgsList[j].emplace_back(undefVal); mask = DXIL::kCompMask_X; } } if (is64 && isTyped) { unsigned size = 1; if (Ty->isVectorTy()) { size = std::min((j + 1) * MaxStoreElemCount, Ty->getVectorNumElements()) - (j * MaxStoreElemCount); } DXASSERT(size <= 2, "raw/typed buffer only allow 4 dwords"); unsigned val0OpIdx = opcode == DXIL::OpCode::TextureStore || opcode == DXIL::OpCode::TextureStoreSample ? DXIL::OperandIndex::kTextureStoreVal0OpIdx : DXIL::OperandIndex::kBufferStoreVal0OpIdx; Value *V0 = storeArgsList[j][val0OpIdx]; Value *V1 = storeArgsList[j][val0OpIdx + 1]; Value *vals32[4]; EltTy = Ty->getScalarType(); Split64bitValForStore(EltTy, {V0, V1}, size, vals32, OP, Builder); // Fill the uninit vals. if (size == 1) { vals32[2] = vals32[0]; vals32[3] = vals32[1]; } // Change valOp to 32 version. for (unsigned i = 0; i < 4; i++) { storeArgsList[j][val0OpIdx + i] = vals32[i]; } // change mask for double if (opcode == DXIL::OpCode::RawBufferStore) { mask = size == 1 ? DXIL::kCompMask_X | DXIL::kCompMask_Y : DXIL::kCompMask_All; } } storeArgsList[j].emplace_back(OP->GetU8Const(mask)); // mask if (opcode == DXIL::OpCode::RawBufferStore) storeArgsList[j].emplace_back(Alignment); // alignment only for raw buffer else if (opcode == DXIL::OpCode::TextureStoreSample) { storeArgsList[j].emplace_back( sampIdx ? sampIdx : Builder.getInt32(0)); // sample idx only for MS textures } Builder.CreateCall(F, storeArgsList[j]); } } Value *TranslateResourceStore(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); IRBuilder<> Builder(CI); DXIL::ResourceKind RK = pObjHelper->GetRK(handle); Value *val = CI->getArgOperand(HLOperandIndex::kStoreValOpIdx); Value *offset = CI->getArgOperand(HLOperandIndex::kStoreOffsetOpIdx); TranslateStore(RK, handle, val, offset, Builder, hlslOP); return nullptr; } } // namespace // Atomic intrinsics. namespace { // Atomic intrinsics. struct AtomicHelper { AtomicHelper(CallInst *CI, OP::OpCode op, Value *h, Type *opType = nullptr); AtomicHelper(CallInst *CI, OP::OpCode op, Value *h, Value *bufIdx, Value *baseOffset, Type *opType = nullptr); OP::OpCode opcode; Value *handle; Value *addr; Value *offset; // Offset for structrued buffer. Value *value; Value *originalValue; Value *compareValue; Type *operationType; }; // For MOP version of Interlocked*. AtomicHelper::AtomicHelper(CallInst *CI, OP::OpCode op, Value *h, Type *opType) : opcode(op), handle(h), offset(nullptr), originalValue(nullptr), operationType(opType) { addr = CI->getArgOperand(HLOperandIndex::kObjectInterlockedDestOpIndex); if (op == OP::OpCode::AtomicCompareExchange) { compareValue = CI->getArgOperand( HLOperandIndex::kObjectInterlockedCmpCompareValueOpIndex); value = CI->getArgOperand(HLOperandIndex::kObjectInterlockedCmpValueOpIndex); if (CI->getNumArgOperands() == (HLOperandIndex::kObjectInterlockedCmpOriginalValueOpIndex + 1)) originalValue = CI->getArgOperand( HLOperandIndex::kObjectInterlockedCmpOriginalValueOpIndex); } else { value = CI->getArgOperand(HLOperandIndex::kObjectInterlockedValueOpIndex); if (CI->getNumArgOperands() == (HLOperandIndex::kObjectInterlockedOriginalValueOpIndex + 1)) originalValue = CI->getArgOperand( HLOperandIndex::kObjectInterlockedOriginalValueOpIndex); } if (nullptr == operationType) operationType = value->getType(); } // For IOP version of Interlocked*. AtomicHelper::AtomicHelper(CallInst *CI, OP::OpCode op, Value *h, Value *bufIdx, Value *baseOffset, Type *opType) : opcode(op), handle(h), addr(bufIdx), offset(baseOffset), originalValue(nullptr), operationType(opType) { if (op == OP::OpCode::AtomicCompareExchange) { compareValue = CI->getArgOperand(HLOperandIndex::kInterlockedCmpCompareValueOpIndex); value = CI->getArgOperand(HLOperandIndex::kInterlockedCmpValueOpIndex); if (CI->getNumArgOperands() == (HLOperandIndex::kInterlockedCmpOriginalValueOpIndex + 1)) originalValue = CI->getArgOperand( HLOperandIndex::kInterlockedCmpOriginalValueOpIndex); } else { value = CI->getArgOperand(HLOperandIndex::kInterlockedValueOpIndex); if (CI->getNumArgOperands() == (HLOperandIndex::kInterlockedOriginalValueOpIndex + 1)) originalValue = CI->getArgOperand(HLOperandIndex::kInterlockedOriginalValueOpIndex); } if (nullptr == operationType) operationType = value->getType(); } void TranslateAtomicBinaryOperation(AtomicHelper &helper, DXIL::AtomicBinOpCode atomicOp, IRBuilder<> &Builder, hlsl::OP *hlslOP) { Value *handle = helper.handle; Value *addr = helper.addr; Value *val = helper.value; Type *Ty = helper.operationType; Type *valTy = val->getType(); Value *undefI = UndefValue::get(Type::getInt32Ty(Ty->getContext())); Function *dxilAtomic = hlslOP->GetOpFunc(helper.opcode, Ty->getScalarType()); Value *opArg = hlslOP->GetU32Const(static_cast<unsigned>(helper.opcode)); Value *atomicOpArg = hlslOP->GetU32Const(static_cast<unsigned>(atomicOp)); if (Ty != valTy) val = Builder.CreateBitCast(val, Ty); Value *args[] = {opArg, handle, atomicOpArg, undefI, undefI, undefI, // coordinates val}; // Setup coordinates. if (addr->getType()->isVectorTy()) { unsigned vectorNumElements = addr->getType()->getVectorNumElements(); DXASSERT(vectorNumElements <= 3, "up to 3 elements for atomic binary op"); assert(vectorNumElements <= 3); for (unsigned i = 0; i < vectorNumElements; i++) { Value *Elt = Builder.CreateExtractElement(addr, i); args[DXIL::OperandIndex::kAtomicBinOpCoord0OpIdx + i] = Elt; } } else args[DXIL::OperandIndex::kAtomicBinOpCoord0OpIdx] = addr; // Set offset for structured buffer. if (helper.offset) args[DXIL::OperandIndex::kAtomicBinOpCoord1OpIdx] = helper.offset; Value *origVal = Builder.CreateCall(dxilAtomic, args, hlslOP->GetAtomicOpName(atomicOp)); if (helper.originalValue) { if (Ty != valTy) origVal = Builder.CreateBitCast(origVal, valTy); Builder.CreateStore(origVal, helper.originalValue); } } Value *TranslateMopAtomicBinaryOperation( CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); IRBuilder<> Builder(CI); switch (IOP) { case IntrinsicOp::MOP_InterlockedAdd: case IntrinsicOp::MOP_InterlockedAdd64: { AtomicHelper helper(CI, DXIL::OpCode::AtomicBinOp, handle); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::Add, Builder, hlslOP); } break; case IntrinsicOp::MOP_InterlockedAnd: case IntrinsicOp::MOP_InterlockedAnd64: { AtomicHelper helper(CI, DXIL::OpCode::AtomicBinOp, handle); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::And, Builder, hlslOP); } break; case IntrinsicOp::MOP_InterlockedExchange: case IntrinsicOp::MOP_InterlockedExchange64: { AtomicHelper helper(CI, DXIL::OpCode::AtomicBinOp, handle); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::Exchange, Builder, hlslOP); } break; case IntrinsicOp::MOP_InterlockedExchangeFloat: { AtomicHelper helper(CI, DXIL::OpCode::AtomicBinOp, handle, Type::getInt32Ty(CI->getContext())); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::Exchange, Builder, hlslOP); } break; case IntrinsicOp::MOP_InterlockedMax: case IntrinsicOp::MOP_InterlockedMax64: { AtomicHelper helper(CI, DXIL::OpCode::AtomicBinOp, handle); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::IMax, Builder, hlslOP); } break; case IntrinsicOp::MOP_InterlockedMin: case IntrinsicOp::MOP_InterlockedMin64: { AtomicHelper helper(CI, DXIL::OpCode::AtomicBinOp, handle); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::IMin, Builder, hlslOP); } break; case IntrinsicOp::MOP_InterlockedUMax: { AtomicHelper helper(CI, DXIL::OpCode::AtomicBinOp, handle); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::UMax, Builder, hlslOP); } break; case IntrinsicOp::MOP_InterlockedUMin: { AtomicHelper helper(CI, DXIL::OpCode::AtomicBinOp, handle); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::UMin, Builder, hlslOP); } break; case IntrinsicOp::MOP_InterlockedOr: case IntrinsicOp::MOP_InterlockedOr64: { AtomicHelper helper(CI, DXIL::OpCode::AtomicBinOp, handle); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::Or, Builder, hlslOP); } break; case IntrinsicOp::MOP_InterlockedXor: case IntrinsicOp::MOP_InterlockedXor64: default: { DXASSERT(IOP == IntrinsicOp::MOP_InterlockedXor || IOP == IntrinsicOp::MOP_InterlockedXor64, "invalid MOP atomic intrinsic"); AtomicHelper helper(CI, DXIL::OpCode::AtomicBinOp, handle); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::Xor, Builder, hlslOP); } break; } return nullptr; } void TranslateAtomicCmpXChg(AtomicHelper &helper, IRBuilder<> &Builder, hlsl::OP *hlslOP) { Value *handle = helper.handle; Value *addr = helper.addr; Value *val = helper.value; Value *cmpVal = helper.compareValue; Type *Ty = helper.operationType; Type *valTy = val->getType(); Value *undefI = UndefValue::get(Type::getInt32Ty(Ty->getContext())); Function *dxilAtomic = hlslOP->GetOpFunc(helper.opcode, Ty->getScalarType()); Value *opArg = hlslOP->GetU32Const(static_cast<unsigned>(helper.opcode)); if (Ty != valTy) { val = Builder.CreateBitCast(val, Ty); if (cmpVal) cmpVal = Builder.CreateBitCast(cmpVal, Ty); } Value *args[] = {opArg, handle, undefI, undefI, undefI, // coordinates cmpVal, val}; // Setup coordinates. if (addr->getType()->isVectorTy()) { unsigned vectorNumElements = addr->getType()->getVectorNumElements(); DXASSERT(vectorNumElements <= 3, "up to 3 elements in atomic op"); assert(vectorNumElements <= 3); for (unsigned i = 0; i < vectorNumElements; i++) { Value *Elt = Builder.CreateExtractElement(addr, i); args[DXIL::OperandIndex::kAtomicCmpExchangeCoord0OpIdx + i] = Elt; } } else args[DXIL::OperandIndex::kAtomicCmpExchangeCoord0OpIdx] = addr; // Set offset for structured buffer. if (helper.offset) args[DXIL::OperandIndex::kAtomicCmpExchangeCoord1OpIdx] = helper.offset; Value *origVal = Builder.CreateCall(dxilAtomic, args); if (helper.originalValue) { if (Ty != valTy) origVal = Builder.CreateBitCast(origVal, valTy); Builder.CreateStore(origVal, helper.originalValue); } } Value *TranslateMopAtomicCmpXChg(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); IRBuilder<> Builder(CI); Type *opType = nullptr; if (IOP == IntrinsicOp::MOP_InterlockedCompareStoreFloatBitwise || IOP == IntrinsicOp::MOP_InterlockedCompareExchangeFloatBitwise) opType = Type::getInt32Ty(CI->getContext()); AtomicHelper atomicHelper(CI, OP::OpCode::AtomicCompareExchange, handle, opType); TranslateAtomicCmpXChg(atomicHelper, Builder, hlslOP); return nullptr; } void TranslateSharedMemOrNodeAtomicBinOp(CallInst *CI, IntrinsicOp IOP, Value *addr) { AtomicRMWInst::BinOp Op; IRBuilder<> Builder(CI); Value *val = CI->getArgOperand(HLOperandIndex::kInterlockedValueOpIndex); PointerType *ptrType = dyn_cast<PointerType>( CI->getArgOperand(HLOperandIndex::kInterlockedDestOpIndex)->getType()); bool needCast = ptrType && ptrType->getElementType()->isFloatTy(); switch (IOP) { case IntrinsicOp::IOP_InterlockedAdd: Op = AtomicRMWInst::BinOp::Add; break; case IntrinsicOp::IOP_InterlockedAnd: Op = AtomicRMWInst::BinOp::And; break; case IntrinsicOp::IOP_InterlockedExchange: if (needCast) { val = Builder.CreateBitCast(val, Type::getInt32Ty(CI->getContext())); addr = Builder.CreateBitCast( addr, Type::getInt32PtrTy(CI->getContext(), addr->getType()->getPointerAddressSpace())); } Op = AtomicRMWInst::BinOp::Xchg; break; case IntrinsicOp::IOP_InterlockedMax: Op = AtomicRMWInst::BinOp::Max; break; case IntrinsicOp::IOP_InterlockedUMax: Op = AtomicRMWInst::BinOp::UMax; break; case IntrinsicOp::IOP_InterlockedMin: Op = AtomicRMWInst::BinOp::Min; break; case IntrinsicOp::IOP_InterlockedUMin: Op = AtomicRMWInst::BinOp::UMin; break; case IntrinsicOp::IOP_InterlockedOr: Op = AtomicRMWInst::BinOp::Or; break; case IntrinsicOp::IOP_InterlockedXor: default: DXASSERT(IOP == IntrinsicOp::IOP_InterlockedXor, "Invalid Intrinsic"); Op = AtomicRMWInst::BinOp::Xor; break; } Value *Result = Builder.CreateAtomicRMW( Op, addr, val, AtomicOrdering::SequentiallyConsistent); if (CI->getNumArgOperands() > HLOperandIndex::kInterlockedOriginalValueOpIndex) { if (needCast) Result = Builder.CreateBitCast(Result, Type::getFloatTy(CI->getContext())); Builder.CreateStore( Result, CI->getArgOperand(HLOperandIndex::kInterlockedOriginalValueOpIndex)); } } static Value *SkipAddrSpaceCast(Value *Ptr) { if (AddrSpaceCastInst *CastInst = dyn_cast<AddrSpaceCastInst>(Ptr)) return CastInst->getOperand(0); else if (ConstantExpr *ConstExpr = dyn_cast<ConstantExpr>(Ptr)) { if (ConstExpr->getOpcode() == Instruction::AddrSpaceCast) { return ConstExpr->getOperand(0); } } return Ptr; } Value * TranslateNodeIncrementOutputCount(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool isPerThread, bool &Translated) { hlsl::OP *OP = &helper.hlslOP; Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); Value *count = CI->getArgOperand(HLOperandIndex::kIncrementOutputCountCountIdx); Function *dxilFunc = OP->GetOpFunc(op, CI->getType()); Value *opArg = OP->GetU32Const((unsigned)op); Value *perThread = OP->GetI1Const(isPerThread); Value *args[] = {opArg, handle, count, perThread}; IRBuilder<> Builder(CI); Builder.CreateCall(dxilFunc, args); return nullptr; } /* HLSL: void EmptyNodeOutput::GroupIncrementOutputCount(uint count) DXIL: void @dx.op.groupIncrementOutputCount(i32 %Opcode, %dx.types.NodeHandle %NodeOutput, i32 count) */ Value *TranslateNodeGroupIncrementOutputCount( CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { return TranslateNodeIncrementOutputCount(CI, IOP, op, helper, pObjHelper, /*isPerThread*/ false, Translated); } /* HLSL: void EmptyNodeOutput::ThreadIncrementOutputCount(uint count) DXIL: void @dx.op.threadIncrementOutputCount(i32 %Opcode, %dx.types.NodeHandle %NodeOutput, i32 count) */ Value *TranslateNodeThreadIncrementOutputCount( CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { return TranslateNodeIncrementOutputCount(CI, IOP, op, helper, pObjHelper, /*isPerThread*/ true, Translated); } // For known non-groupshared, verify that the destination param is valid void ValidateAtomicDestination(CallInst *CI, HLObjectOperationLowerHelper *pObjHelper) { Value *dest = CI->getArgOperand(HLOperandIndex::kInterlockedDestOpIndex); // If we encounter a gep, we may provide a more specific error message bool hasGep = isa<GetElementPtrInst>(dest); // Confirm that dest is a properly-used UAV // Drill through subscripts and geps, anything else indicates a misuse while (true) { if (GetElementPtrInst *gep = dyn_cast<GetElementPtrInst>(dest)) { dest = gep->getPointerOperand(); continue; } if (CallInst *handle = dyn_cast<CallInst>(dest)) { hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroup(handle->getCalledFunction()); if (group != HLOpcodeGroup::HLSubscript) break; dest = handle->getArgOperand(HLOperandIndex::kSubscriptObjectOpIdx); continue; } break; } if (pObjHelper->GetRC(dest) == DXIL::ResourceClass::UAV) { DXIL::ResourceKind RK = pObjHelper->GetRK(dest); if (DXIL::IsStructuredBuffer(RK)) return; // no errors if (DXIL::IsTyped(RK)) { if (hasGep) dxilutil::EmitErrorOnInstruction( CI, "Typed resources used in atomic operations must have a scalar " "element type."); return; // error emitted or else no errors } } dxilutil::EmitErrorOnInstruction( CI, "Atomic operation targets must be groupshared, Node Record or UAV."); } Value *TranslateIopAtomicBinaryOperation( CallInst *CI, IntrinsicOp IOP, DXIL::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *addr = CI->getArgOperand(HLOperandIndex::kInterlockedDestOpIndex); addr = SkipAddrSpaceCast(addr); unsigned addressSpace = addr->getType()->getPointerAddressSpace(); if (addressSpace == DXIL::kTGSMAddrSpace || addressSpace == DXIL::kNodeRecordAddrSpace) TranslateSharedMemOrNodeAtomicBinOp(CI, IOP, addr); else { // If not groupshared or node record, we either have an error case or will // translate the atomic op in the process of translating users of the // subscript operator Mark not translated and validate dest param Translated = false; ValidateAtomicDestination(CI, pObjHelper); } return nullptr; } void TranslateSharedMemOrNodeAtomicCmpXChg(CallInst *CI, Value *addr) { Value *val = CI->getArgOperand(HLOperandIndex::kInterlockedCmpValueOpIndex); Value *cmpVal = CI->getArgOperand(HLOperandIndex::kInterlockedCmpCompareValueOpIndex); IRBuilder<> Builder(CI); PointerType *ptrType = dyn_cast<PointerType>( CI->getArgOperand(HLOperandIndex::kInterlockedDestOpIndex)->getType()); bool needCast = false; if (ptrType && ptrType->getElementType()->isFloatTy()) { needCast = true; val = Builder.CreateBitCast(val, Type::getInt32Ty(CI->getContext())); cmpVal = Builder.CreateBitCast(cmpVal, Type::getInt32Ty(CI->getContext())); unsigned addrSpace = cast<PointerType>(addr->getType())->getAddressSpace(); addr = Builder.CreateBitCast( addr, Type::getInt32PtrTy(CI->getContext(), addrSpace)); } Value *Result = Builder.CreateAtomicCmpXchg( addr, cmpVal, val, AtomicOrdering::SequentiallyConsistent, AtomicOrdering::SequentiallyConsistent); if (CI->getNumArgOperands() > HLOperandIndex::kInterlockedCmpOriginalValueOpIndex) { Value *originVal = Builder.CreateExtractValue(Result, 0); if (needCast) originVal = Builder.CreateBitCast(originVal, Type::getFloatTy(CI->getContext())); Builder.CreateStore( originVal, CI->getArgOperand(HLOperandIndex::kInterlockedCmpOriginalValueOpIndex)); } } Value *TranslateIopAtomicCmpXChg(CallInst *CI, IntrinsicOp IOP, DXIL::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *addr = CI->getArgOperand(HLOperandIndex::kInterlockedDestOpIndex); addr = SkipAddrSpaceCast(addr); unsigned addressSpace = addr->getType()->getPointerAddressSpace(); if (addressSpace == DXIL::kTGSMAddrSpace || addressSpace == DXIL::kNodeRecordAddrSpace) TranslateSharedMemOrNodeAtomicCmpXChg(CI, addr); else { // If not groupshared, we either have an error case or will translate // the atomic op in the process of translating users of the subscript // operator Mark not translated and validate dest param Translated = false; ValidateAtomicDestination(CI, pObjHelper); } return nullptr; } } // namespace // Process Tess Factor. namespace { // Clamp to [0.0f..1.0f], NaN->0.0f. Value *CleanupTessFactorScale(Value *input, hlsl::OP *hlslOP, IRBuilder<> &Builder) { float fMin = 0; float fMax = 1; Type *f32Ty = input->getType()->getScalarType(); Value *minFactor = ConstantFP::get(f32Ty, fMin); Value *maxFactor = ConstantFP::get(f32Ty, fMax); Type *Ty = input->getType(); if (Ty->isVectorTy()) minFactor = SplatToVector(minFactor, input->getType(), Builder); Value *temp = TrivialDxilBinaryOperation(DXIL::OpCode::FMax, input, minFactor, hlslOP, Builder); if (Ty->isVectorTy()) maxFactor = SplatToVector(maxFactor, input->getType(), Builder); return TrivialDxilBinaryOperation(DXIL::OpCode::FMin, temp, maxFactor, hlslOP, Builder); } // Clamp to [1.0f..Inf], NaN->1.0f. Value *CleanupTessFactor(Value *input, hlsl::OP *hlslOP, IRBuilder<> &Builder) { float fMin = 1.0; Type *f32Ty = input->getType()->getScalarType(); Value *minFactor = ConstantFP::get(f32Ty, fMin); minFactor = SplatToVector(minFactor, input->getType(), Builder); return TrivialDxilBinaryOperation(DXIL::OpCode::FMax, input, minFactor, hlslOP, Builder); } // Do partitioning-specific clamping. Value *ClampTessFactor(Value *input, DXIL::TessellatorPartitioning partitionMode, hlsl::OP *hlslOP, IRBuilder<> &Builder) { const unsigned kTESSELLATOR_MAX_EVEN_TESSELLATION_FACTOR = 64; const unsigned kTESSELLATOR_MAX_ODD_TESSELLATION_FACTOR = 63; const unsigned kTESSELLATOR_MIN_EVEN_TESSELLATION_FACTOR = 2; const unsigned kTESSELLATOR_MIN_ODD_TESSELLATION_FACTOR = 1; const unsigned kTESSELLATOR_MAX_TESSELLATION_FACTOR = 64; float fMin; float fMax; switch (partitionMode) { case DXIL::TessellatorPartitioning::Integer: fMin = kTESSELLATOR_MIN_ODD_TESSELLATION_FACTOR; fMax = kTESSELLATOR_MAX_TESSELLATION_FACTOR; break; case DXIL::TessellatorPartitioning::Pow2: fMin = kTESSELLATOR_MIN_ODD_TESSELLATION_FACTOR; fMax = kTESSELLATOR_MAX_EVEN_TESSELLATION_FACTOR; break; case DXIL::TessellatorPartitioning::FractionalOdd: fMin = kTESSELLATOR_MIN_ODD_TESSELLATION_FACTOR; fMax = kTESSELLATOR_MAX_ODD_TESSELLATION_FACTOR; break; case DXIL::TessellatorPartitioning::FractionalEven: default: DXASSERT(partitionMode == DXIL::TessellatorPartitioning::FractionalEven, "invalid partition mode"); fMin = kTESSELLATOR_MIN_EVEN_TESSELLATION_FACTOR; fMax = kTESSELLATOR_MAX_EVEN_TESSELLATION_FACTOR; break; } Type *f32Ty = input->getType()->getScalarType(); Value *minFactor = ConstantFP::get(f32Ty, fMin); Value *maxFactor = ConstantFP::get(f32Ty, fMax); Type *Ty = input->getType(); if (Ty->isVectorTy()) minFactor = SplatToVector(minFactor, input->getType(), Builder); Value *temp = TrivialDxilBinaryOperation(DXIL::OpCode::FMax, input, minFactor, hlslOP, Builder); if (Ty->isVectorTy()) maxFactor = SplatToVector(maxFactor, input->getType(), Builder); return TrivialDxilBinaryOperation(DXIL::OpCode::FMin, temp, maxFactor, hlslOP, Builder); } // round up for integer/pow2 partitioning // note that this code assumes the inputs should be in the range [1, inf), // which should be enforced by the clamp above. Value *RoundUpTessFactor(Value *input, DXIL::TessellatorPartitioning partitionMode, hlsl::OP *hlslOP, IRBuilder<> &Builder) { switch (partitionMode) { case DXIL::TessellatorPartitioning::Integer: return TrivialDxilUnaryOperation(DXIL::OpCode::Round_pi, input, hlslOP, Builder); case DXIL::TessellatorPartitioning::Pow2: { const unsigned kExponentMask = 0x7f800000; const unsigned kExponentLSB = 0x00800000; const unsigned kMantissaMask = 0x007fffff; Type *Ty = input->getType(); // (val = (asuint(val) & mantissamask) ? // (asuint(val) & exponentmask) + exponentbump : // asuint(val) & exponentmask; Type *uintTy = Type::getInt32Ty(Ty->getContext()); if (Ty->isVectorTy()) uintTy = VectorType::get(uintTy, Ty->getVectorNumElements()); Value *uintVal = Builder.CreateCast(Instruction::CastOps::FPToUI, input, uintTy); Value *mantMask = ConstantInt::get(uintTy->getScalarType(), kMantissaMask); mantMask = SplatToVector(mantMask, uintTy, Builder); Value *manVal = Builder.CreateAnd(uintVal, mantMask); Value *expMask = ConstantInt::get(uintTy->getScalarType(), kExponentMask); expMask = SplatToVector(expMask, uintTy, Builder); Value *expVal = Builder.CreateAnd(uintVal, expMask); Value *expLSB = ConstantInt::get(uintTy->getScalarType(), kExponentLSB); expLSB = SplatToVector(expLSB, uintTy, Builder); Value *newExpVal = Builder.CreateAdd(expVal, expLSB); Value *manValNotZero = Builder.CreateICmpEQ(manVal, ConstantAggregateZero::get(uintTy)); Value *factors = Builder.CreateSelect(manValNotZero, newExpVal, expVal); return Builder.CreateUIToFP(factors, Ty); } break; case DXIL::TessellatorPartitioning::FractionalEven: case DXIL::TessellatorPartitioning::FractionalOdd: return input; default: DXASSERT(0, "invalid partition mode"); return nullptr; } } Value *TranslateProcessIsolineTessFactors( CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; // Get partition mode DXASSERT_NOMSG(helper.functionProps); DXASSERT(helper.functionProps->shaderKind == ShaderModel::Kind::Hull, "must be hull shader"); DXIL::TessellatorPartitioning partition = helper.functionProps->ShaderProps.HS.partition; IRBuilder<> Builder(CI); Value *rawDetailFactor = CI->getArgOperand(HLOperandIndex::kProcessTessFactorRawDetailFactor); rawDetailFactor = Builder.CreateExtractElement(rawDetailFactor, (uint64_t)0); Value *rawDensityFactor = CI->getArgOperand(HLOperandIndex::kProcessTessFactorRawDensityFactor); rawDensityFactor = Builder.CreateExtractElement(rawDensityFactor, (uint64_t)0); Value *init = UndefValue::get(VectorType::get(helper.f32Ty, 2)); init = Builder.CreateInsertElement(init, rawDetailFactor, (uint64_t)0); init = Builder.CreateInsertElement(init, rawDetailFactor, (uint64_t)1); Value *clamped = ClampTessFactor(init, partition, hlslOP, Builder); Value *rounded = RoundUpTessFactor(clamped, partition, hlslOP, Builder); Value *roundedDetailFactor = CI->getArgOperand(HLOperandIndex::kProcessTessFactorRoundedDetailFactor); Value *temp = UndefValue::get(VectorType::get(helper.f32Ty, 1)); Value *roundedX = Builder.CreateExtractElement(rounded, (uint64_t)0); temp = Builder.CreateInsertElement(temp, roundedX, (uint64_t)0); Builder.CreateStore(temp, roundedDetailFactor); Value *roundedDensityFactor = CI->getArgOperand(HLOperandIndex::kProcessTessFactorRoundedDensityFactor); Value *roundedY = Builder.CreateExtractElement(rounded, 1); temp = Builder.CreateInsertElement(temp, roundedY, (uint64_t)0); Builder.CreateStore(temp, roundedDensityFactor); return nullptr; } // 3 inputs, 1 result Value *ApplyTriTessFactorOp(Value *input, DXIL::OpCode opcode, hlsl::OP *hlslOP, IRBuilder<> &Builder) { Value *input0 = Builder.CreateExtractElement(input, (uint64_t)0); Value *input1 = Builder.CreateExtractElement(input, 1); Value *input2 = Builder.CreateExtractElement(input, 2); if (opcode == DXIL::OpCode::FMax || opcode == DXIL::OpCode::FMin) { Value *temp = TrivialDxilBinaryOperation(opcode, input0, input1, hlslOP, Builder); Value *combined = TrivialDxilBinaryOperation(opcode, temp, input2, hlslOP, Builder); return combined; } else { // Avg. Value *temp = Builder.CreateFAdd(input0, input1); Value *combined = Builder.CreateFAdd(temp, input2); Value *rcp = ConstantFP::get(input0->getType(), 1.0 / 3.0); combined = Builder.CreateFMul(combined, rcp); return combined; } } // 4 inputs, 1 result Value *ApplyQuadTessFactorOp(Value *input, DXIL::OpCode opcode, hlsl::OP *hlslOP, IRBuilder<> &Builder) { Value *input0 = Builder.CreateExtractElement(input, (uint64_t)0); Value *input1 = Builder.CreateExtractElement(input, 1); Value *input2 = Builder.CreateExtractElement(input, 2); Value *input3 = Builder.CreateExtractElement(input, 3); if (opcode == DXIL::OpCode::FMax || opcode == DXIL::OpCode::FMin) { Value *temp0 = TrivialDxilBinaryOperation(opcode, input0, input1, hlslOP, Builder); Value *temp1 = TrivialDxilBinaryOperation(opcode, input2, input3, hlslOP, Builder); Value *combined = TrivialDxilBinaryOperation(opcode, temp0, temp1, hlslOP, Builder); return combined; } else { // Avg. Value *temp0 = Builder.CreateFAdd(input0, input1); Value *temp1 = Builder.CreateFAdd(input2, input3); Value *combined = Builder.CreateFAdd(temp0, temp1); Value *rcp = ConstantFP::get(input0->getType(), 0.25); combined = Builder.CreateFMul(combined, rcp); return combined; } } // 4 inputs, 2 result Value *Apply2DQuadTessFactorOp(Value *input, DXIL::OpCode opcode, hlsl::OP *hlslOP, IRBuilder<> &Builder) { Value *input0 = Builder.CreateExtractElement(input, (uint64_t)0); Value *input1 = Builder.CreateExtractElement(input, 1); Value *input2 = Builder.CreateExtractElement(input, 2); Value *input3 = Builder.CreateExtractElement(input, 3); if (opcode == DXIL::OpCode::FMax || opcode == DXIL::OpCode::FMin) { Value *temp0 = TrivialDxilBinaryOperation(opcode, input0, input1, hlslOP, Builder); Value *temp1 = TrivialDxilBinaryOperation(opcode, input2, input3, hlslOP, Builder); Value *combined = UndefValue::get(VectorType::get(input0->getType(), 2)); combined = Builder.CreateInsertElement(combined, temp0, (uint64_t)0); combined = Builder.CreateInsertElement(combined, temp1, 1); return combined; } else { // Avg. Value *temp0 = Builder.CreateFAdd(input0, input1); Value *temp1 = Builder.CreateFAdd(input2, input3); Value *combined = UndefValue::get(VectorType::get(input0->getType(), 2)); combined = Builder.CreateInsertElement(combined, temp0, (uint64_t)0); combined = Builder.CreateInsertElement(combined, temp1, 1); Constant *rcp = ConstantFP::get(input0->getType(), 0.5); rcp = ConstantVector::getSplat(2, rcp); combined = Builder.CreateFMul(combined, rcp); return combined; } } Value *ResolveSmallValue(Value **pClampedResult, Value *rounded, Value *averageUnscaled, float cutoffVal, DXIL::TessellatorPartitioning partitionMode, hlsl::OP *hlslOP, IRBuilder<> &Builder) { Value *clampedResult = *pClampedResult; Value *clampedVal = clampedResult; Value *roundedVal = rounded; // Do partitioning-specific clamping. Value *clampedAvg = ClampTessFactor(averageUnscaled, partitionMode, hlslOP, Builder); Constant *cutoffVals = ConstantFP::get(Type::getFloatTy(rounded->getContext()), cutoffVal); if (clampedAvg->getType()->isVectorTy()) cutoffVals = ConstantVector::getSplat( clampedAvg->getType()->getVectorNumElements(), cutoffVals); // Limit the value. clampedAvg = TrivialDxilBinaryOperation(DXIL::OpCode::FMin, clampedAvg, cutoffVals, hlslOP, Builder); // Round up for integer/pow2 partitioning. Value *roundedAvg = RoundUpTessFactor(clampedAvg, partitionMode, hlslOP, Builder); if (rounded->getType() != cutoffVals->getType()) cutoffVals = ConstantVector::getSplat( rounded->getType()->getVectorNumElements(), cutoffVals); // If the scaled value is less than three, then take the unscaled average. Value *lt = Builder.CreateFCmpOLT(rounded, cutoffVals); if (clampedAvg->getType() != clampedVal->getType()) clampedAvg = SplatToVector(clampedAvg, clampedVal->getType(), Builder); *pClampedResult = Builder.CreateSelect(lt, clampedAvg, clampedVal); if (roundedAvg->getType() != roundedVal->getType()) roundedAvg = SplatToVector(roundedAvg, roundedVal->getType(), Builder); Value *result = Builder.CreateSelect(lt, roundedAvg, roundedVal); return result; } void ResolveQuadAxes(Value **pFinalResult, Value **pClampedResult, float cutoffVal, DXIL::TessellatorPartitioning partitionMode, hlsl::OP *hlslOP, IRBuilder<> &Builder) { Value *finalResult = *pFinalResult; Value *clampedResult = *pClampedResult; Value *clampR = clampedResult; Value *finalR = finalResult; Type *f32Ty = Type::getFloatTy(finalR->getContext()); Constant *cutoffVals = ConstantFP::get(f32Ty, cutoffVal); Value *minValsX = cutoffVals; Value *minValsY = RoundUpTessFactor(cutoffVals, partitionMode, hlslOP, Builder); Value *clampRX = Builder.CreateExtractElement(clampR, (uint64_t)0); Value *clampRY = Builder.CreateExtractElement(clampR, 1); Value *maxValsX = TrivialDxilBinaryOperation(DXIL::OpCode::FMax, clampRX, clampRY, hlslOP, Builder); Value *finalRX = Builder.CreateExtractElement(finalR, (uint64_t)0); Value *finalRY = Builder.CreateExtractElement(finalR, 1); Value *maxValsY = TrivialDxilBinaryOperation(DXIL::OpCode::FMax, finalRX, finalRY, hlslOP, Builder); // Don't go over our threshold ("final" one is rounded). Value *optionX = TrivialDxilBinaryOperation(DXIL::OpCode::FMin, maxValsX, minValsX, hlslOP, Builder); Value *optionY = TrivialDxilBinaryOperation(DXIL::OpCode::FMin, maxValsY, minValsY, hlslOP, Builder); Value *clampL = SplatToVector(optionX, clampR->getType(), Builder); Value *finalL = SplatToVector(optionY, finalR->getType(), Builder); cutoffVals = ConstantVector::getSplat(2, cutoffVals); Value *lt = Builder.CreateFCmpOLT(clampedResult, cutoffVals); *pClampedResult = Builder.CreateSelect(lt, clampL, clampR); *pFinalResult = Builder.CreateSelect(lt, finalL, finalR); } Value *TranslateProcessTessFactors(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; // Get partition mode DXASSERT_NOMSG(helper.functionProps); DXASSERT(helper.functionProps->shaderKind == ShaderModel::Kind::Hull, "must be hull shader"); DXIL::TessellatorPartitioning partition = helper.functionProps->ShaderProps.HS.partition; IRBuilder<> Builder(CI); DXIL::OpCode tessFactorOp = DXIL::OpCode::NumOpCodes; switch (IOP) { case IntrinsicOp::IOP_Process2DQuadTessFactorsMax: case IntrinsicOp::IOP_ProcessQuadTessFactorsMax: case IntrinsicOp::IOP_ProcessTriTessFactorsMax: tessFactorOp = DXIL::OpCode::FMax; break; case IntrinsicOp::IOP_Process2DQuadTessFactorsMin: case IntrinsicOp::IOP_ProcessQuadTessFactorsMin: case IntrinsicOp::IOP_ProcessTriTessFactorsMin: tessFactorOp = DXIL::OpCode::FMin; break; default: // Default is Avg. break; } Value *rawEdgeFactor = CI->getArgOperand(HLOperandIndex::kProcessTessFactorRawEdgeFactor); Value *insideScale = CI->getArgOperand(HLOperandIndex::kProcessTessFactorInsideScale); // Clamp to [0.0f..1.0f], NaN->0.0f. Value *scales = CleanupTessFactorScale(insideScale, hlslOP, Builder); // Do partitioning-specific clamping. Value *clamped = ClampTessFactor(rawEdgeFactor, partition, hlslOP, Builder); // Round up for integer/pow2 partitioning. Value *rounded = RoundUpTessFactor(clamped, partition, hlslOP, Builder); // Store the output. Value *roundedEdgeFactor = CI->getArgOperand(HLOperandIndex::kProcessTessFactorRoundedEdgeFactor); Builder.CreateStore(rounded, roundedEdgeFactor); // Clamp to [1.0f..Inf], NaN->1.0f. bool isQuad = false; Value *clean = CleanupTessFactor(rawEdgeFactor, hlslOP, Builder); Value *factors = nullptr; switch (IOP) { case IntrinsicOp::IOP_Process2DQuadTessFactorsAvg: case IntrinsicOp::IOP_Process2DQuadTessFactorsMax: case IntrinsicOp::IOP_Process2DQuadTessFactorsMin: factors = Apply2DQuadTessFactorOp(clean, tessFactorOp, hlslOP, Builder); break; case IntrinsicOp::IOP_ProcessQuadTessFactorsAvg: case IntrinsicOp::IOP_ProcessQuadTessFactorsMax: case IntrinsicOp::IOP_ProcessQuadTessFactorsMin: factors = ApplyQuadTessFactorOp(clean, tessFactorOp, hlslOP, Builder); isQuad = true; break; case IntrinsicOp::IOP_ProcessTriTessFactorsAvg: case IntrinsicOp::IOP_ProcessTriTessFactorsMax: case IntrinsicOp::IOP_ProcessTriTessFactorsMin: factors = ApplyTriTessFactorOp(clean, tessFactorOp, hlslOP, Builder); break; default: DXASSERT(0, "invalid opcode for ProcessTessFactor"); break; } Value *scaledI = nullptr; if (scales->getType() == factors->getType()) scaledI = Builder.CreateFMul(factors, scales); else { Value *vecFactors = SplatToVector(factors, scales->getType(), Builder); scaledI = Builder.CreateFMul(vecFactors, scales); } // Do partitioning-specific clamping. Value *clampedI = ClampTessFactor(scaledI, partition, hlslOP, Builder); // Round up for integer/pow2 partitioning. Value *roundedI = RoundUpTessFactor(clampedI, partition, hlslOP, Builder); Value *finalI = roundedI; if (partition == DXIL::TessellatorPartitioning::FractionalOdd) { // If not max, set to AVG. if (tessFactorOp != DXIL::OpCode::FMax) tessFactorOp = DXIL::OpCode::NumOpCodes; bool b2D = false; Value *avgFactorsI = nullptr; switch (IOP) { case IntrinsicOp::IOP_Process2DQuadTessFactorsAvg: case IntrinsicOp::IOP_Process2DQuadTessFactorsMax: case IntrinsicOp::IOP_Process2DQuadTessFactorsMin: avgFactorsI = Apply2DQuadTessFactorOp(clean, tessFactorOp, hlslOP, Builder); b2D = true; break; case IntrinsicOp::IOP_ProcessQuadTessFactorsAvg: case IntrinsicOp::IOP_ProcessQuadTessFactorsMax: case IntrinsicOp::IOP_ProcessQuadTessFactorsMin: avgFactorsI = ApplyQuadTessFactorOp(clean, tessFactorOp, hlslOP, Builder); break; case IntrinsicOp::IOP_ProcessTriTessFactorsAvg: case IntrinsicOp::IOP_ProcessTriTessFactorsMax: case IntrinsicOp::IOP_ProcessTriTessFactorsMin: avgFactorsI = ApplyTriTessFactorOp(clean, tessFactorOp, hlslOP, Builder); break; default: DXASSERT(0, "invalid opcode for ProcessTessFactor"); break; } finalI = ResolveSmallValue(/*inout*/ &clampedI, roundedI, avgFactorsI, /*cufoff*/ 3.0, partition, hlslOP, Builder); if (b2D) ResolveQuadAxes(/*inout*/ &finalI, /*inout*/ &clampedI, /*cutoff*/ 3.0, partition, hlslOP, Builder); } Value *unroundedInsideFactor = CI->getArgOperand( HLOperandIndex::kProcessTessFactorUnRoundedInsideFactor); Type *outFactorTy = unroundedInsideFactor->getType()->getPointerElementType(); if (outFactorTy != clampedI->getType()) { DXASSERT(isQuad, "quad only write one channel of out factor"); (void)isQuad; clampedI = Builder.CreateExtractElement(clampedI, (uint64_t)0); // Splat clampedI to float2. clampedI = SplatToVector(clampedI, outFactorTy, Builder); } Builder.CreateStore(clampedI, unroundedInsideFactor); Value *roundedInsideFactor = CI->getArgOperand(HLOperandIndex::kProcessTessFactorRoundedInsideFactor); if (outFactorTy != finalI->getType()) { DXASSERT(isQuad, "quad only write one channel of out factor"); finalI = Builder.CreateExtractElement(finalI, (uint64_t)0); // Splat finalI to float2. finalI = SplatToVector(finalI, outFactorTy, Builder); } Builder.CreateStore(finalI, roundedInsideFactor); return nullptr; } } // namespace // Ray Tracing. namespace { Value *TranslateReportIntersection(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *THit = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc0Idx); Value *HitKind = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc1Idx); Value *Attr = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc2Idx); Value *opArg = hlslOP->GetU32Const(static_cast<unsigned>(opcode)); Type *Ty = Attr->getType(); Function *F = hlslOP->GetOpFunc(opcode, Ty); IRBuilder<> Builder(CI); return Builder.CreateCall(F, {opArg, THit, HitKind, Attr}); } Value *TranslateCallShader(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *ShaderIndex = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *Parameter = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); Value *opArg = hlslOP->GetU32Const(static_cast<unsigned>(opcode)); Type *Ty = Parameter->getType(); Function *F = hlslOP->GetOpFunc(opcode, Ty); IRBuilder<> Builder(CI); return Builder.CreateCall(F, {opArg, ShaderIndex, Parameter}); } Value *TranslateTraceRay(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *rayDesc = CI->getArgOperand(HLOperandIndex::kTraceRayRayDescOpIdx); Value *payLoad = CI->getArgOperand(HLOperandIndex::kTraceRayPayLoadOpIdx); Value *opArg = hlslOP->GetU32Const(static_cast<unsigned>(opcode)); Value *Args[DXIL::OperandIndex::kTraceRayNumOp]; Args[0] = opArg; for (unsigned i = 1; i < HLOperandIndex::kTraceRayRayDescOpIdx; i++) { Args[i] = CI->getArgOperand(i); } IRBuilder<> Builder(CI); // struct RayDesc //{ // float3 Origin; // float TMin; // float3 Direction; // float TMax; //}; Value *zeroIdx = hlslOP->GetU32Const(0); Value *origin = Builder.CreateGEP(rayDesc, {zeroIdx, zeroIdx}); origin = Builder.CreateLoad(origin); unsigned index = DXIL::OperandIndex::kTraceRayRayDescOpIdx; Args[index++] = Builder.CreateExtractElement(origin, (uint64_t)0); Args[index++] = Builder.CreateExtractElement(origin, 1); Args[index++] = Builder.CreateExtractElement(origin, 2); Value *tmin = Builder.CreateGEP(rayDesc, {zeroIdx, hlslOP->GetU32Const(1)}); tmin = Builder.CreateLoad(tmin); Args[index++] = tmin; Value *direction = Builder.CreateGEP(rayDesc, {zeroIdx, hlslOP->GetU32Const(2)}); direction = Builder.CreateLoad(direction); Args[index++] = Builder.CreateExtractElement(direction, (uint64_t)0); Args[index++] = Builder.CreateExtractElement(direction, 1); Args[index++] = Builder.CreateExtractElement(direction, 2); Value *tmax = Builder.CreateGEP(rayDesc, {zeroIdx, hlslOP->GetU32Const(3)}); tmax = Builder.CreateLoad(tmax); Args[index++] = tmax; Args[DXIL::OperandIndex::kTraceRayPayloadOpIdx] = payLoad; Type *Ty = payLoad->getType(); Function *F = hlslOP->GetOpFunc(opcode, Ty); return Builder.CreateCall(F, Args); } // RayQuery methods Value *TranslateAllocateRayQuery(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *refArgs[] = {nullptr, CI->getOperand(1)}; return TrivialDxilOperation(opcode, refArgs, helper.voidTy, CI, hlslOP); } Value *TranslateTraceRayInline(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *opArg = hlslOP->GetU32Const(static_cast<unsigned>(opcode)); Value *Args[DXIL::OperandIndex::kTraceRayInlineNumOp]; Args[0] = opArg; for (unsigned i = 1; i < HLOperandIndex::kTraceRayInlineRayDescOpIdx; i++) { Args[i] = CI->getArgOperand(i); } IRBuilder<> Builder(CI); unsigned hlIndex = HLOperandIndex::kTraceRayInlineRayDescOpIdx; unsigned index = DXIL::OperandIndex::kTraceRayInlineRayDescOpIdx; // struct RayDesc //{ // float3 Origin; Value *origin = CI->getArgOperand(hlIndex++); Args[index++] = Builder.CreateExtractElement(origin, (uint64_t)0); Args[index++] = Builder.CreateExtractElement(origin, 1); Args[index++] = Builder.CreateExtractElement(origin, 2); // float TMin; Args[index++] = CI->getArgOperand(hlIndex++); // float3 Direction; Value *direction = CI->getArgOperand(hlIndex++); Args[index++] = Builder.CreateExtractElement(direction, (uint64_t)0); Args[index++] = Builder.CreateExtractElement(direction, 1); Args[index++] = Builder.CreateExtractElement(direction, 2); // float TMax; Args[index++] = CI->getArgOperand(hlIndex++); //}; DXASSERT_NOMSG(index == DXIL::OperandIndex::kTraceRayInlineNumOp); Function *F = hlslOP->GetOpFunc(opcode, Builder.getVoidTy()); return Builder.CreateCall(F, Args); } Value *TranslateCommitProceduralPrimitiveHit( CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *THit = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); Value *opArg = hlslOP->GetU32Const(static_cast<unsigned>(opcode)); Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); Value *Args[] = {opArg, handle, THit}; IRBuilder<> Builder(CI); Function *F = hlslOP->GetOpFunc(opcode, Builder.getVoidTy()); return Builder.CreateCall(F, Args); } Value *TranslateGenericRayQueryMethod(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *opArg = hlslOP->GetU32Const(static_cast<unsigned>(opcode)); Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); IRBuilder<> Builder(CI); Function *F = hlslOP->GetOpFunc(opcode, CI->getType()); return Builder.CreateCall(F, {opArg, handle}); } Value *TranslateRayQueryMatrix3x4Operation( CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; VectorType *Ty = cast<VectorType>(CI->getType()); Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); uint32_t rVals[] = {0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2}; Constant *rows = ConstantDataVector::get(CI->getContext(), rVals); uint8_t cVals[] = {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3}; Constant *cols = ConstantDataVector::get(CI->getContext(), cVals); Value *retVal = TrivialDxilOperation(opcode, {nullptr, handle, rows, cols}, Ty, CI, hlslOP); return retVal; } Value *TranslateRayQueryTransposedMatrix3x4Operation( CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; VectorType *Ty = cast<VectorType>(CI->getType()); Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); uint32_t rVals[] = {0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2}; Constant *rows = ConstantDataVector::get(CI->getContext(), rVals); uint8_t cVals[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3}; Constant *cols = ConstantDataVector::get(CI->getContext(), cVals); Value *retVal = TrivialDxilOperation(opcode, {nullptr, handle, rows, cols}, Ty, CI, hlslOP); return retVal; } Value *TranslateRayQueryFloat2Getter(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; VectorType *Ty = cast<VectorType>(CI->getType()); Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); uint8_t elementVals[] = {0, 1}; Constant *element = ConstantDataVector::get(CI->getContext(), elementVals); Value *retVal = TrivialDxilOperation(opcode, {nullptr, handle, element}, Ty, CI, hlslOP); return retVal; } Value *TranslateRayQueryFloat3Getter(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; VectorType *Ty = cast<VectorType>(CI->getType()); Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); uint8_t elementVals[] = {0, 1, 2}; Constant *element = ConstantDataVector::get(CI->getContext(), elementVals); Value *retVal = TrivialDxilOperation(opcode, {nullptr, handle, element}, Ty, CI, hlslOP); return retVal; } Value *TranslateNoArgVectorOperation(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; VectorType *Ty = cast<VectorType>(CI->getType()); uint8_t vals[] = {0, 1, 2, 3}; Constant *src = ConstantDataVector::get(CI->getContext(), vals); Value *retVal = TrivialDxilOperation(opcode, {nullptr, src}, Ty, CI, hlslOP); return retVal; } Value *TranslateNoArgMatrix3x4Operation( CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; VectorType *Ty = cast<VectorType>(CI->getType()); uint32_t rVals[] = {0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2}; Constant *rows = ConstantDataVector::get(CI->getContext(), rVals); uint8_t cVals[] = {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3}; Constant *cols = ConstantDataVector::get(CI->getContext(), cVals); Value *retVal = TrivialDxilOperation(opcode, {nullptr, rows, cols}, Ty, CI, hlslOP); return retVal; } Value *TranslateNoArgTransposedMatrix3x4Operation( CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; VectorType *Ty = cast<VectorType>(CI->getType()); uint32_t rVals[] = {0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2}; Constant *rows = ConstantDataVector::get(CI->getContext(), rVals); uint8_t cVals[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3}; Constant *cols = ConstantDataVector::get(CI->getContext(), cVals); Value *retVal = TrivialDxilOperation(opcode, {nullptr, rows, cols}, Ty, CI, hlslOP); return retVal; } /* HLSL: void ThreadNodeOutputRecords<recordType>::OutputComplete(); void GroupNodeOutputRecords<recordType>::OutputComplete(); DXIL: void @dx.op.outputComplete(i32 %Opcode, %dx.types.NodeRecordHandle %RecordHandle) */ Value *TranslateNodeOutputComplete(CallInst *CI, IntrinsicOp IOP, OP::OpCode op, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *OP = &helper.hlslOP; Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); DXASSERT_NOMSG(handle->getType() == OP->GetNodeRecordHandleType()); Function *dxilFunc = OP->GetOpFunc(op, CI->getType()); Value *opArg = OP->GetU32Const((unsigned)op); IRBuilder<> Builder(CI); return Builder.CreateCall(dxilFunc, {opArg, handle}); } Value *TranslateNoArgNoReturnPreserveOutput( CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Instruction *pResult = cast<Instruction>( TrivialNoArgOperation(CI, IOP, opcode, helper, pObjHelper, Translated)); // HL intrinsic must have had a return injected just after the call. // SROA_Parameter_HLSL will copy from alloca to output just before each // return. Now move call after the copy and just before the return. if (isa<ReturnInst>(pResult->getNextNode())) return pResult; ReturnInst *RetI = cast<ReturnInst>(pResult->getParent()->getTerminator()); pResult->removeFromParent(); pResult->insertBefore(RetI); return pResult; } // Special half dot2 with accumulate to float Value *TranslateDot2Add(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *src0 = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc0Idx); const unsigned vecSize = 2; DXASSERT(src0->getType()->isVectorTy() && vecSize == src0->getType()->getVectorNumElements() && src0->getType()->getScalarType()->isHalfTy(), "otherwise, unexpected input dimension or component type"); Value *src1 = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc1Idx); DXASSERT(src0->getType() == src1->getType(), "otherwise, mismatched argument types"); Value *accArg = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc2Idx); Type *accTy = accArg->getType(); DXASSERT(!accTy->isVectorTy() && accTy->isFloatTy(), "otherwise, unexpected accumulator type"); IRBuilder<> Builder(CI); Function *dxilFunc = hlslOP->GetOpFunc(opcode, accTy); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); SmallVector<Value *, 6> args; args.emplace_back(opArg); args.emplace_back(accArg); for (unsigned i = 0; i < vecSize; i++) args.emplace_back(Builder.CreateExtractElement(src0, i)); for (unsigned i = 0; i < vecSize; i++) args.emplace_back(Builder.CreateExtractElement(src1, i)); return Builder.CreateCall(dxilFunc, args); } Value *TranslateDot4AddPacked(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *src0 = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc0Idx); DXASSERT( !src0->getType()->isVectorTy() && src0->getType()->isIntegerTy(32), "otherwise, unexpected vector support in high level intrinsic template"); Value *src1 = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc1Idx); DXASSERT(src0->getType() == src1->getType(), "otherwise, mismatched argument types"); Value *accArg = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc2Idx); Type *accTy = accArg->getType(); DXASSERT( !accTy->isVectorTy() && accTy->isIntegerTy(32), "otherwise, unexpected vector support in high level intrinsic template"); IRBuilder<> Builder(CI); Function *dxilFunc = hlslOP->GetOpFunc(opcode, accTy); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); return Builder.CreateCall(dxilFunc, {opArg, accArg, src0, src1}); } Value *TranslatePack(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *val = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Type *valTy = val->getType(); Type *eltTy = valTy->getScalarType(); DXASSERT(valTy->isVectorTy() && valTy->getVectorNumElements() == 4 && eltTy->isIntegerTy() && (eltTy->getIntegerBitWidth() == 32 || eltTy->getIntegerBitWidth() == 16), "otherwise, unexpected input dimension or component type"); DXIL::PackMode packMode = DXIL::PackMode::Trunc; switch (IOP) { case hlsl::IntrinsicOp::IOP_pack_clamp_s8: packMode = DXIL::PackMode::SClamp; break; case hlsl::IntrinsicOp::IOP_pack_clamp_u8: packMode = DXIL::PackMode::UClamp; break; case hlsl::IntrinsicOp::IOP_pack_s8: case hlsl::IntrinsicOp::IOP_pack_u8: packMode = DXIL::PackMode::Trunc; break; default: DXASSERT(false, "unexpected opcode"); break; } IRBuilder<> Builder(CI); Function *dxilFunc = hlslOP->GetOpFunc(opcode, eltTy); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); Constant *packModeArg = hlslOP->GetU8Const((unsigned)packMode); Value *elt0 = Builder.CreateExtractElement(val, (uint64_t)0); Value *elt1 = Builder.CreateExtractElement(val, (uint64_t)1); Value *elt2 = Builder.CreateExtractElement(val, (uint64_t)2); Value *elt3 = Builder.CreateExtractElement(val, (uint64_t)3); return Builder.CreateCall(dxilFunc, {opArg, packModeArg, elt0, elt1, elt2, elt3}); } Value *TranslateUnpack(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP *hlslOP = &helper.hlslOP; Value *packedVal = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); DXASSERT( !packedVal->getType()->isVectorTy() && packedVal->getType()->isIntegerTy(32), "otherwise, unexpected vector support in high level intrinsic template"); Type *overloadType = nullptr; DXIL::UnpackMode unpackMode = DXIL::UnpackMode::Unsigned; switch (IOP) { case hlsl::IntrinsicOp::IOP_unpack_s8s32: unpackMode = DXIL::UnpackMode::Signed; overloadType = helper.i32Ty; break; case hlsl::IntrinsicOp::IOP_unpack_u8u32: unpackMode = DXIL::UnpackMode::Unsigned; overloadType = helper.i32Ty; break; case hlsl::IntrinsicOp::IOP_unpack_s8s16: unpackMode = DXIL::UnpackMode::Signed; overloadType = helper.i16Ty; break; case hlsl::IntrinsicOp::IOP_unpack_u8u16: unpackMode = DXIL::UnpackMode::Unsigned; overloadType = helper.i16Ty; break; default: DXASSERT(false, "unexpected opcode"); break; } IRBuilder<> Builder(CI); Function *dxilFunc = hlslOP->GetOpFunc(opcode, overloadType); Constant *opArg = hlslOP->GetU32Const((unsigned)opcode); Constant *unpackModeArg = hlslOP->GetU8Const((unsigned)unpackMode); Value *Res = Builder.CreateCall(dxilFunc, {opArg, unpackModeArg, packedVal}); // Convert the final aggregate into a vector to make the types match const unsigned vecSize = 4; Value *ResVec = UndefValue::get(CI->getType()); for (unsigned i = 0; i < vecSize; ++i) { Value *Elt = Builder.CreateExtractValue(Res, i); ResVec = Builder.CreateInsertElement(ResVec, Elt, i); } return ResVec; } } // namespace // Resource Handle. namespace { Value *TranslateGetHandleFromHeap(CallInst *CI, IntrinsicOp IOP, DXIL::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { hlsl::OP &hlslOP = helper.hlslOP; Function *dxilFunc = hlslOP.GetOpFunc(opcode, helper.voidTy); IRBuilder<> Builder(CI); Value *opArg = ConstantInt::get(helper.i32Ty, (unsigned)opcode); return Builder.CreateCall( dxilFunc, {opArg, CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx), CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx), // TODO: update nonUniformIndex later. Builder.getInt1(false)}); } } // namespace // Translate and/or/select intrinsics namespace { Value *TranslateAnd(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *x = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *y = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); Type *Ty = CI->getType(); Type *EltTy = Ty->getScalarType(); IRBuilder<> Builder(CI); if (Ty != EltTy) { Value *Result = UndefValue::get(Ty); for (unsigned i = 0; i < Ty->getVectorNumElements(); i++) { Value *EltX = Builder.CreateExtractElement(x, i); Value *EltY = Builder.CreateExtractElement(y, i); Value *tmp = Builder.CreateAnd(EltX, EltY); Result = Builder.CreateInsertElement(Result, tmp, i); } return Result; } return Builder.CreateAnd(x, y); } Value *TranslateOr(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *x = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc0Idx); Value *y = CI->getArgOperand(HLOperandIndex::kBinaryOpSrc1Idx); Type *Ty = CI->getType(); Type *EltTy = Ty->getScalarType(); IRBuilder<> Builder(CI); if (Ty != EltTy) { Value *Result = UndefValue::get(Ty); for (unsigned i = 0; i < Ty->getVectorNumElements(); i++) { Value *EltX = Builder.CreateExtractElement(x, i); Value *EltY = Builder.CreateExtractElement(y, i); Value *tmp = Builder.CreateOr(EltX, EltY); Result = Builder.CreateInsertElement(Result, tmp, i); } return Result; } return Builder.CreateOr(x, y); } Value *TranslateSelect(CallInst *CI, IntrinsicOp IOP, OP::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *cond = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc0Idx); Value *t = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc1Idx); Value *f = CI->getArgOperand(HLOperandIndex::kTrinaryOpSrc2Idx); Type *Ty = CI->getType(); Type *EltTy = Ty->getScalarType(); IRBuilder<> Builder(CI); if (Ty != EltTy) { Value *Result = UndefValue::get(Ty); for (unsigned i = 0; i < Ty->getVectorNumElements(); i++) { Value *EltCond = Builder.CreateExtractElement(cond, i); Value *EltTrue = Builder.CreateExtractElement(t, i); Value *EltFalse = Builder.CreateExtractElement(f, i); Value *tmp = Builder.CreateSelect(EltCond, EltTrue, EltFalse); Result = Builder.CreateInsertElement(Result, tmp, i); } return Result; } return Builder.CreateSelect(cond, t, f); } } // namespace // Lower table. namespace { Value *EmptyLower(CallInst *CI, IntrinsicOp IOP, DXIL::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Translated = false; dxilutil::EmitErrorOnInstruction(CI, "Unsupported intrinsic."); return nullptr; } // SPIRV change starts #ifdef ENABLE_SPIRV_CODEGEN Value *UnsupportedVulkanIntrinsic(CallInst *CI, IntrinsicOp IOP, DXIL::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Translated = false; dxilutil::EmitErrorOnInstruction(CI, "Unsupported Vulkan intrinsic."); return nullptr; } #endif // ENABLE_SPIRV_CODEGEN // SPIRV change ends Value *StreamOutputLower(CallInst *CI, IntrinsicOp IOP, DXIL::OpCode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { // Translated in DxilGenerationPass::GenerateStreamOutputOperation. // Do nothing here. // Mark not translated. Translated = false; return nullptr; } // This table has to match IntrinsicOp orders IntrinsicLower gLowerTable[] = { {IntrinsicOp::IOP_AcceptHitAndEndSearch, TranslateNoArgNoReturnPreserveOutput, DXIL::OpCode::AcceptHitAndEndSearch}, {IntrinsicOp::IOP_AddUint64, TranslateAddUint64, DXIL::OpCode::UAddc}, {IntrinsicOp::IOP_AllMemoryBarrier, TrivialBarrier, DXIL::OpCode::Barrier}, {IntrinsicOp::IOP_AllMemoryBarrierWithGroupSync, TrivialBarrier, DXIL::OpCode::Barrier}, {IntrinsicOp::IOP_AllocateRayQuery, TranslateAllocateRayQuery, DXIL::OpCode::AllocateRayQuery}, {IntrinsicOp::IOP_Barrier, TranslateBarrier, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_CallShader, TranslateCallShader, DXIL::OpCode::CallShader}, {IntrinsicOp::IOP_CheckAccessFullyMapped, TranslateCheckAccess, DXIL::OpCode::CheckAccessFullyMapped}, {IntrinsicOp::IOP_CreateResourceFromHeap, TranslateGetHandleFromHeap, DXIL::OpCode::CreateHandleFromHeap}, {IntrinsicOp::IOP_D3DCOLORtoUBYTE4, TranslateD3DColorToUByte4, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_DeviceMemoryBarrier, TrivialBarrier, DXIL::OpCode::Barrier}, {IntrinsicOp::IOP_DeviceMemoryBarrierWithGroupSync, TrivialBarrier, DXIL::OpCode::Barrier}, {IntrinsicOp::IOP_DispatchMesh, TrivialDispatchMesh, DXIL::OpCode::DispatchMesh}, {IntrinsicOp::IOP_DispatchRaysDimensions, TranslateNoArgVectorOperation, DXIL::OpCode::DispatchRaysDimensions}, {IntrinsicOp::IOP_DispatchRaysIndex, TranslateNoArgVectorOperation, DXIL::OpCode::DispatchRaysIndex}, {IntrinsicOp::IOP_EvaluateAttributeAtSample, TranslateEvalSample, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_EvaluateAttributeCentroid, TranslateEvalCentroid, DXIL::OpCode::EvalCentroid}, {IntrinsicOp::IOP_EvaluateAttributeSnapped, TranslateEvalSnapped, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_GeometryIndex, TrivialNoArgWithRetOperation, DXIL::OpCode::GeometryIndex}, {IntrinsicOp::IOP_GetAttributeAtVertex, TranslateGetAttributeAtVertex, DXIL::OpCode::AttributeAtVertex}, {IntrinsicOp::IOP_GetRemainingRecursionLevels, TrivialNoArgOperation, DXIL::OpCode::GetRemainingRecursionLevels}, {IntrinsicOp::IOP_GetRenderTargetSampleCount, TrivialNoArgOperation, DXIL::OpCode::RenderTargetGetSampleCount}, {IntrinsicOp::IOP_GetRenderTargetSamplePosition, TranslateGetRTSamplePos, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_GroupMemoryBarrier, TrivialBarrier, DXIL::OpCode::Barrier}, {IntrinsicOp::IOP_GroupMemoryBarrierWithGroupSync, TrivialBarrier, DXIL::OpCode::Barrier}, {IntrinsicOp::IOP_HitKind, TrivialNoArgWithRetOperation, DXIL::OpCode::HitKind}, {IntrinsicOp::IOP_IgnoreHit, TranslateNoArgNoReturnPreserveOutput, DXIL::OpCode::IgnoreHit}, {IntrinsicOp::IOP_InstanceID, TrivialNoArgWithRetOperation, DXIL::OpCode::InstanceID}, {IntrinsicOp::IOP_InstanceIndex, TrivialNoArgWithRetOperation, DXIL::OpCode::InstanceIndex}, {IntrinsicOp::IOP_InterlockedAdd, TranslateIopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_InterlockedAnd, TranslateIopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_InterlockedCompareExchange, TranslateIopAtomicCmpXChg, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_InterlockedCompareExchangeFloatBitwise, TranslateIopAtomicCmpXChg, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_InterlockedCompareStore, TranslateIopAtomicCmpXChg, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_InterlockedCompareStoreFloatBitwise, TranslateIopAtomicCmpXChg, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_InterlockedExchange, TranslateIopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_InterlockedMax, TranslateIopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_InterlockedMin, TranslateIopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_InterlockedOr, TranslateIopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_InterlockedXor, TranslateIopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_IsHelperLane, TrivialNoArgWithRetOperation, DXIL::OpCode::IsHelperLane}, {IntrinsicOp::IOP_NonUniformResourceIndex, TranslateNonUniformResourceIndex, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_ObjectRayDirection, TranslateNoArgVectorOperation, DXIL::OpCode::ObjectRayDirection}, {IntrinsicOp::IOP_ObjectRayOrigin, TranslateNoArgVectorOperation, DXIL::OpCode::ObjectRayOrigin}, {IntrinsicOp::IOP_ObjectToWorld, TranslateNoArgMatrix3x4Operation, DXIL::OpCode::ObjectToWorld}, {IntrinsicOp::IOP_ObjectToWorld3x4, TranslateNoArgMatrix3x4Operation, DXIL::OpCode::ObjectToWorld}, {IntrinsicOp::IOP_ObjectToWorld4x3, TranslateNoArgTransposedMatrix3x4Operation, DXIL::OpCode::ObjectToWorld}, {IntrinsicOp::IOP_PrimitiveIndex, TrivialNoArgWithRetOperation, DXIL::OpCode::PrimitiveIndex}, {IntrinsicOp::IOP_Process2DQuadTessFactorsAvg, TranslateProcessTessFactors, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_Process2DQuadTessFactorsMax, TranslateProcessTessFactors, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_Process2DQuadTessFactorsMin, TranslateProcessTessFactors, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_ProcessIsolineTessFactors, TranslateProcessIsolineTessFactors, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_ProcessQuadTessFactorsAvg, TranslateProcessTessFactors, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_ProcessQuadTessFactorsMax, TranslateProcessTessFactors, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_ProcessQuadTessFactorsMin, TranslateProcessTessFactors, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_ProcessTriTessFactorsAvg, TranslateProcessTessFactors, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_ProcessTriTessFactorsMax, TranslateProcessTessFactors, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_ProcessTriTessFactorsMin, TranslateProcessTessFactors, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_QuadAll, TranslateQuadAnyAll, DXIL::OpCode::QuadVote}, {IntrinsicOp::IOP_QuadAny, TranslateQuadAnyAll, DXIL::OpCode::QuadVote}, {IntrinsicOp::IOP_QuadReadAcrossDiagonal, TranslateQuadReadAcross, DXIL::OpCode::QuadOp}, {IntrinsicOp::IOP_QuadReadAcrossX, TranslateQuadReadAcross, DXIL::OpCode::QuadOp}, {IntrinsicOp::IOP_QuadReadAcrossY, TranslateQuadReadAcross, DXIL::OpCode::QuadOp}, {IntrinsicOp::IOP_QuadReadLaneAt, TranslateQuadReadLaneAt, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_RayFlags, TrivialNoArgWithRetOperation, DXIL::OpCode::RayFlags}, {IntrinsicOp::IOP_RayTCurrent, TrivialNoArgWithRetOperation, DXIL::OpCode::RayTCurrent}, {IntrinsicOp::IOP_RayTMin, TrivialNoArgWithRetOperation, DXIL::OpCode::RayTMin}, {IntrinsicOp::IOP_ReportHit, TranslateReportIntersection, DXIL::OpCode::ReportHit}, {IntrinsicOp::IOP_SetMeshOutputCounts, TrivialSetMeshOutputCounts, DXIL::OpCode::SetMeshOutputCounts}, {IntrinsicOp::IOP_TraceRay, TranslateTraceRay, DXIL::OpCode::TraceRay}, {IntrinsicOp::IOP_WaveActiveAllEqual, TranslateWaveAllEqual, DXIL::OpCode::WaveActiveAllEqual}, {IntrinsicOp::IOP_WaveActiveAllTrue, TranslateWaveA2B, DXIL::OpCode::WaveAllTrue}, {IntrinsicOp::IOP_WaveActiveAnyTrue, TranslateWaveA2B, DXIL::OpCode::WaveAnyTrue}, {IntrinsicOp::IOP_WaveActiveBallot, TranslateWaveBallot, DXIL::OpCode::WaveActiveBallot}, {IntrinsicOp::IOP_WaveActiveBitAnd, TranslateWaveA2A, DXIL::OpCode::WaveActiveBit}, {IntrinsicOp::IOP_WaveActiveBitOr, TranslateWaveA2A, DXIL::OpCode::WaveActiveBit}, {IntrinsicOp::IOP_WaveActiveBitXor, TranslateWaveA2A, DXIL::OpCode::WaveActiveBit}, {IntrinsicOp::IOP_WaveActiveCountBits, TranslateWaveA2B, DXIL::OpCode::WaveAllBitCount}, {IntrinsicOp::IOP_WaveActiveMax, TranslateWaveA2A, DXIL::OpCode::WaveActiveOp}, {IntrinsicOp::IOP_WaveActiveMin, TranslateWaveA2A, DXIL::OpCode::WaveActiveOp}, {IntrinsicOp::IOP_WaveActiveProduct, TranslateWaveA2A, DXIL::OpCode::WaveActiveOp}, {IntrinsicOp::IOP_WaveActiveSum, TranslateWaveA2A, DXIL::OpCode::WaveActiveOp}, {IntrinsicOp::IOP_WaveGetLaneCount, TranslateWaveToVal, DXIL::OpCode::WaveGetLaneCount}, {IntrinsicOp::IOP_WaveGetLaneIndex, TranslateWaveToVal, DXIL::OpCode::WaveGetLaneIndex}, {IntrinsicOp::IOP_WaveIsFirstLane, TranslateWaveToVal, DXIL::OpCode::WaveIsFirstLane}, {IntrinsicOp::IOP_WaveMatch, TranslateWaveMatch, DXIL::OpCode::WaveMatch}, {IntrinsicOp::IOP_WaveMultiPrefixBitAnd, TranslateWaveMultiPrefix, DXIL::OpCode::WaveMultiPrefixOp}, {IntrinsicOp::IOP_WaveMultiPrefixBitOr, TranslateWaveMultiPrefix, DXIL::OpCode::WaveMultiPrefixOp}, {IntrinsicOp::IOP_WaveMultiPrefixBitXor, TranslateWaveMultiPrefix, DXIL::OpCode::WaveMultiPrefixOp}, {IntrinsicOp::IOP_WaveMultiPrefixCountBits, TranslateWaveMultiPrefixBitCount, DXIL::OpCode::WaveMultiPrefixBitCount}, {IntrinsicOp::IOP_WaveMultiPrefixProduct, TranslateWaveMultiPrefix, DXIL::OpCode::WaveMultiPrefixOp}, {IntrinsicOp::IOP_WaveMultiPrefixSum, TranslateWaveMultiPrefix, DXIL::OpCode::WaveMultiPrefixOp}, {IntrinsicOp::IOP_WavePrefixCountBits, TranslateWaveA2B, DXIL::OpCode::WavePrefixBitCount}, {IntrinsicOp::IOP_WavePrefixProduct, TranslateWaveA2A, DXIL::OpCode::WavePrefixOp}, {IntrinsicOp::IOP_WavePrefixSum, TranslateWaveA2A, DXIL::OpCode::WavePrefixOp}, {IntrinsicOp::IOP_WaveReadLaneAt, TranslateWaveReadLaneAt, DXIL::OpCode::WaveReadLaneAt}, {IntrinsicOp::IOP_WaveReadLaneFirst, TranslateWaveReadLaneFirst, DXIL::OpCode::WaveReadLaneFirst}, {IntrinsicOp::IOP_WorldRayDirection, TranslateNoArgVectorOperation, DXIL::OpCode::WorldRayDirection}, {IntrinsicOp::IOP_WorldRayOrigin, TranslateNoArgVectorOperation, DXIL::OpCode::WorldRayOrigin}, {IntrinsicOp::IOP_WorldToObject, TranslateNoArgMatrix3x4Operation, DXIL::OpCode::WorldToObject}, {IntrinsicOp::IOP_WorldToObject3x4, TranslateNoArgMatrix3x4Operation, DXIL::OpCode::WorldToObject}, {IntrinsicOp::IOP_WorldToObject4x3, TranslateNoArgTransposedMatrix3x4Operation, DXIL::OpCode::WorldToObject}, {IntrinsicOp::IOP_abort, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_abs, TranslateAbs, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_acos, TrivialUnaryOperation, DXIL::OpCode::Acos}, {IntrinsicOp::IOP_all, TranslateAll, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_and, TranslateAnd, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_any, TranslateAny, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_asdouble, TranslateAsDouble, DXIL::OpCode::MakeDouble}, {IntrinsicOp::IOP_asfloat, TranslateBitcast, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_asfloat16, TranslateBitcast, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_asin, TrivialUnaryOperation, DXIL::OpCode::Asin}, {IntrinsicOp::IOP_asint, TranslateBitcast, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_asint16, TranslateBitcast, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_asuint, TranslateAsUint, DXIL::OpCode::SplitDouble}, {IntrinsicOp::IOP_asuint16, TranslateAsUint, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_atan, TrivialUnaryOperation, DXIL::OpCode::Atan}, {IntrinsicOp::IOP_atan2, TranslateAtan2, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_ceil, TrivialUnaryOperation, DXIL::OpCode::Round_pi}, {IntrinsicOp::IOP_clamp, TranslateClamp, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_clip, TranslateClip, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_cos, TrivialUnaryOperation, DXIL::OpCode::Cos}, {IntrinsicOp::IOP_cosh, TrivialUnaryOperation, DXIL::OpCode::Hcos}, {IntrinsicOp::IOP_countbits, TrivialUnaryOperation, DXIL::OpCode::Countbits}, {IntrinsicOp::IOP_cross, TranslateCross, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_ddx, TrivialUnaryOperation, DXIL::OpCode::DerivCoarseX}, {IntrinsicOp::IOP_ddx_coarse, TrivialUnaryOperation, DXIL::OpCode::DerivCoarseX}, {IntrinsicOp::IOP_ddx_fine, TrivialUnaryOperation, DXIL::OpCode::DerivFineX}, {IntrinsicOp::IOP_ddy, TrivialUnaryOperation, DXIL::OpCode::DerivCoarseY}, {IntrinsicOp::IOP_ddy_coarse, TrivialUnaryOperation, DXIL::OpCode::DerivCoarseY}, {IntrinsicOp::IOP_ddy_fine, TrivialUnaryOperation, DXIL::OpCode::DerivFineY}, {IntrinsicOp::IOP_degrees, TranslateDegrees, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_determinant, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_distance, TranslateDistance, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_dot, TranslateDot, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_dot2add, TranslateDot2Add, DXIL::OpCode::Dot2AddHalf}, {IntrinsicOp::IOP_dot4add_i8packed, TranslateDot4AddPacked, DXIL::OpCode::Dot4AddI8Packed}, {IntrinsicOp::IOP_dot4add_u8packed, TranslateDot4AddPacked, DXIL::OpCode::Dot4AddU8Packed}, {IntrinsicOp::IOP_dst, TranslateDst, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_exp, TranslateExp, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_exp2, TrivialUnaryOperation, DXIL::OpCode::Exp}, {IntrinsicOp::IOP_f16tof32, TranslateF16ToF32, DXIL::OpCode::LegacyF16ToF32}, {IntrinsicOp::IOP_f32tof16, TranslateF32ToF16, DXIL::OpCode::LegacyF32ToF16}, {IntrinsicOp::IOP_faceforward, TranslateFaceforward, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_firstbithigh, TranslateFirstbitHi, DXIL::OpCode::FirstbitSHi}, {IntrinsicOp::IOP_firstbitlow, TranslateFirstbitLo, DXIL::OpCode::FirstbitLo}, {IntrinsicOp::IOP_floor, TrivialUnaryOperation, DXIL::OpCode::Round_ni}, {IntrinsicOp::IOP_fma, TrivialTrinaryOperation, DXIL::OpCode::Fma}, {IntrinsicOp::IOP_fmod, TranslateFMod, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_frac, TrivialUnaryOperation, DXIL::OpCode::Frc}, {IntrinsicOp::IOP_frexp, TranslateFrexp, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_fwidth, TranslateFWidth, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_isfinite, TrivialIsSpecialFloat, DXIL::OpCode::IsFinite}, {IntrinsicOp::IOP_isinf, TrivialIsSpecialFloat, DXIL::OpCode::IsInf}, {IntrinsicOp::IOP_isnan, TrivialIsSpecialFloat, DXIL::OpCode::IsNaN}, {IntrinsicOp::IOP_ldexp, TranslateLdExp, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_length, TranslateLength, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_lerp, TranslateLerp, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_lit, TranslateLit, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_log, TranslateLog, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_log10, TranslateLog10, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_log2, TrivialUnaryOperation, DXIL::OpCode::Log}, {IntrinsicOp::IOP_mad, TranslateFUITrinary, DXIL::OpCode::IMad}, {IntrinsicOp::IOP_max, TranslateFUIBinary, DXIL::OpCode::IMax}, {IntrinsicOp::IOP_min, TranslateFUIBinary, DXIL::OpCode::IMin}, {IntrinsicOp::IOP_modf, TranslateModF, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_msad4, TranslateMSad4, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_mul, TranslateMul, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_normalize, TranslateNormalize, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_or, TranslateOr, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_pack_clamp_s8, TranslatePack, DXIL::OpCode::Pack4x8}, {IntrinsicOp::IOP_pack_clamp_u8, TranslatePack, DXIL::OpCode::Pack4x8}, {IntrinsicOp::IOP_pack_s8, TranslatePack, DXIL::OpCode::Pack4x8}, {IntrinsicOp::IOP_pack_u8, TranslatePack, DXIL::OpCode::Pack4x8}, {IntrinsicOp::IOP_pow, TranslatePow, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_printf, TranslatePrintf, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_radians, TranslateRadians, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_rcp, TranslateRCP, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_reflect, TranslateReflect, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_refract, TranslateRefract, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_reversebits, TrivialUnaryOperation, DXIL::OpCode::Bfrev}, {IntrinsicOp::IOP_round, TrivialUnaryOperation, DXIL::OpCode::Round_ne}, {IntrinsicOp::IOP_rsqrt, TrivialUnaryOperation, DXIL::OpCode::Rsqrt}, {IntrinsicOp::IOP_saturate, TrivialUnaryOperation, DXIL::OpCode::Saturate}, {IntrinsicOp::IOP_select, TranslateSelect, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_sign, TranslateSign, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_sin, TrivialUnaryOperation, DXIL::OpCode::Sin}, {IntrinsicOp::IOP_sincos, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_sinh, TrivialUnaryOperation, DXIL::OpCode::Hsin}, {IntrinsicOp::IOP_smoothstep, TranslateSmoothStep, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_source_mark, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_sqrt, TrivialUnaryOperation, DXIL::OpCode::Sqrt}, {IntrinsicOp::IOP_step, TranslateStep, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tan, TrivialUnaryOperation, DXIL::OpCode::Tan}, {IntrinsicOp::IOP_tanh, TrivialUnaryOperation, DXIL::OpCode::Htan}, {IntrinsicOp::IOP_tex1D, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex1Dbias, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex1Dgrad, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex1Dlod, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex1Dproj, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex2D, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex2Dbias, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex2Dgrad, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex2Dlod, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex2Dproj, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex3D, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex3Dbias, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex3Dgrad, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex3Dlod, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_tex3Dproj, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_texCUBE, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_texCUBEbias, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_texCUBEgrad, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_texCUBElod, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_texCUBEproj, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_transpose, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_trunc, TrivialUnaryOperation, DXIL::OpCode::Round_z}, {IntrinsicOp::IOP_unpack_s8s16, TranslateUnpack, DXIL::OpCode::Unpack4x8}, {IntrinsicOp::IOP_unpack_s8s32, TranslateUnpack, DXIL::OpCode::Unpack4x8}, {IntrinsicOp::IOP_unpack_u8u16, TranslateUnpack, DXIL::OpCode::Unpack4x8}, {IntrinsicOp::IOP_unpack_u8u32, TranslateUnpack, DXIL::OpCode::Unpack4x8}, #ifdef ENABLE_SPIRV_CODEGEN {IntrinsicOp::IOP_VkRawBufferLoad, UnsupportedVulkanIntrinsic, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_VkRawBufferStore, UnsupportedVulkanIntrinsic, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_VkReadClock, UnsupportedVulkanIntrinsic, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_Vkext_execution_mode, UnsupportedVulkanIntrinsic, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_Vkext_execution_mode_id, UnsupportedVulkanIntrinsic, DXIL::OpCode::NumOpCodes}, #endif // ENABLE_SPIRV_CODEGEN {IntrinsicOp::MOP_Append, StreamOutputLower, DXIL::OpCode::EmitStream}, {IntrinsicOp::MOP_RestartStrip, StreamOutputLower, DXIL::OpCode::CutStream}, {IntrinsicOp::MOP_CalculateLevelOfDetail, TranslateCalculateLOD, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_CalculateLevelOfDetailUnclamped, TranslateCalculateLOD, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_GetDimensions, TranslateGetDimensions, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_Load, TranslateResourceLoad, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_Sample, TranslateSample, DXIL::OpCode::Sample}, {IntrinsicOp::MOP_SampleBias, TranslateSample, DXIL::OpCode::SampleBias}, {IntrinsicOp::MOP_SampleCmp, TranslateSample, DXIL::OpCode::SampleCmp}, {IntrinsicOp::MOP_SampleCmpBias, TranslateSample, DXIL::OpCode::SampleCmpBias}, {IntrinsicOp::MOP_SampleCmpGrad, TranslateSample, DXIL::OpCode::SampleCmpGrad}, {IntrinsicOp::MOP_SampleCmpLevel, TranslateSample, DXIL::OpCode::SampleCmpLevel}, {IntrinsicOp::MOP_SampleCmpLevelZero, TranslateSample, DXIL::OpCode::SampleCmpLevelZero}, {IntrinsicOp::MOP_SampleGrad, TranslateSample, DXIL::OpCode::SampleGrad}, {IntrinsicOp::MOP_SampleLevel, TranslateSample, DXIL::OpCode::SampleLevel}, {IntrinsicOp::MOP_Gather, TranslateGather, DXIL::OpCode::TextureGather}, {IntrinsicOp::MOP_GatherAlpha, TranslateGather, DXIL::OpCode::TextureGather}, {IntrinsicOp::MOP_GatherBlue, TranslateGather, DXIL::OpCode::TextureGather}, {IntrinsicOp::MOP_GatherCmp, TranslateGather, DXIL::OpCode::TextureGatherCmp}, {IntrinsicOp::MOP_GatherCmpAlpha, TranslateGather, DXIL::OpCode::TextureGatherCmp}, {IntrinsicOp::MOP_GatherCmpBlue, TranslateGather, DXIL::OpCode::TextureGatherCmp}, {IntrinsicOp::MOP_GatherCmpGreen, TranslateGather, DXIL::OpCode::TextureGatherCmp}, {IntrinsicOp::MOP_GatherCmpRed, TranslateGather, DXIL::OpCode::TextureGatherCmp}, {IntrinsicOp::MOP_GatherGreen, TranslateGather, DXIL::OpCode::TextureGather}, {IntrinsicOp::MOP_GatherRaw, TranslateGather, DXIL::OpCode::TextureGatherRaw}, {IntrinsicOp::MOP_GatherRed, TranslateGather, DXIL::OpCode::TextureGather}, {IntrinsicOp::MOP_GetSamplePosition, TranslateGetSamplePosition, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_Load2, TranslateResourceLoad, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_Load3, TranslateResourceLoad, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_Load4, TranslateResourceLoad, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedAdd, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedAdd64, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedAnd, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedAnd64, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedCompareExchange, TranslateMopAtomicCmpXChg, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedCompareExchange64, TranslateMopAtomicCmpXChg, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedCompareExchangeFloatBitwise, TranslateMopAtomicCmpXChg, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedCompareStore, TranslateMopAtomicCmpXChg, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedCompareStore64, TranslateMopAtomicCmpXChg, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedCompareStoreFloatBitwise, TranslateMopAtomicCmpXChg, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedExchange, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedExchange64, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedExchangeFloat, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedMax, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedMax64, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedMin, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedMin64, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedOr, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedOr64, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedXor, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedXor64, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_Store, TranslateResourceStore, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_Store2, TranslateResourceStore, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_Store3, TranslateResourceStore, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_Store4, TranslateResourceStore, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_DecrementCounter, GenerateUpdateCounter, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_IncrementCounter, GenerateUpdateCounter, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_Consume, EmptyLower, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_WriteSamplerFeedback, TranslateWriteSamplerFeedback, DXIL::OpCode::WriteSamplerFeedback}, {IntrinsicOp::MOP_WriteSamplerFeedbackBias, TranslateWriteSamplerFeedback, DXIL::OpCode::WriteSamplerFeedbackBias}, {IntrinsicOp::MOP_WriteSamplerFeedbackGrad, TranslateWriteSamplerFeedback, DXIL::OpCode::WriteSamplerFeedbackGrad}, {IntrinsicOp::MOP_WriteSamplerFeedbackLevel, TranslateWriteSamplerFeedback, DXIL::OpCode::WriteSamplerFeedbackLevel}, {IntrinsicOp::MOP_Abort, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_Abort}, {IntrinsicOp::MOP_CandidateGeometryIndex, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CandidateGeometryIndex}, {IntrinsicOp::MOP_CandidateInstanceContributionToHitGroupIndex, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CandidateInstanceContributionToHitGroupIndex}, {IntrinsicOp::MOP_CandidateInstanceID, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CandidateInstanceID}, {IntrinsicOp::MOP_CandidateInstanceIndex, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CandidateInstanceIndex}, {IntrinsicOp::MOP_CandidateObjectRayDirection, TranslateRayQueryFloat3Getter, DXIL::OpCode::RayQuery_CandidateObjectRayDirection}, {IntrinsicOp::MOP_CandidateObjectRayOrigin, TranslateRayQueryFloat3Getter, DXIL::OpCode::RayQuery_CandidateObjectRayOrigin}, {IntrinsicOp::MOP_CandidateObjectToWorld3x4, TranslateRayQueryMatrix3x4Operation, DXIL::OpCode::RayQuery_CandidateObjectToWorld3x4}, {IntrinsicOp::MOP_CandidateObjectToWorld4x3, TranslateRayQueryTransposedMatrix3x4Operation, DXIL::OpCode::RayQuery_CandidateObjectToWorld3x4}, {IntrinsicOp::MOP_CandidatePrimitiveIndex, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CandidatePrimitiveIndex}, {IntrinsicOp::MOP_CandidateProceduralPrimitiveNonOpaque, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CandidateProceduralPrimitiveNonOpaque}, {IntrinsicOp::MOP_CandidateTriangleBarycentrics, TranslateRayQueryFloat2Getter, DXIL::OpCode::RayQuery_CandidateTriangleBarycentrics}, {IntrinsicOp::MOP_CandidateTriangleFrontFace, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CandidateTriangleFrontFace}, {IntrinsicOp::MOP_CandidateTriangleRayT, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CandidateTriangleRayT}, {IntrinsicOp::MOP_CandidateType, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CandidateType}, {IntrinsicOp::MOP_CandidateWorldToObject3x4, TranslateRayQueryMatrix3x4Operation, DXIL::OpCode::RayQuery_CandidateWorldToObject3x4}, {IntrinsicOp::MOP_CandidateWorldToObject4x3, TranslateRayQueryTransposedMatrix3x4Operation, DXIL::OpCode::RayQuery_CandidateWorldToObject3x4}, {IntrinsicOp::MOP_CommitNonOpaqueTriangleHit, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CommitNonOpaqueTriangleHit}, {IntrinsicOp::MOP_CommitProceduralPrimitiveHit, TranslateCommitProceduralPrimitiveHit, DXIL::OpCode::RayQuery_CommitProceduralPrimitiveHit}, {IntrinsicOp::MOP_CommittedGeometryIndex, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CommittedGeometryIndex}, {IntrinsicOp::MOP_CommittedInstanceContributionToHitGroupIndex, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CommittedInstanceContributionToHitGroupIndex}, {IntrinsicOp::MOP_CommittedInstanceID, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CommittedInstanceID}, {IntrinsicOp::MOP_CommittedInstanceIndex, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CommittedInstanceIndex}, {IntrinsicOp::MOP_CommittedObjectRayDirection, TranslateRayQueryFloat3Getter, DXIL::OpCode::RayQuery_CommittedObjectRayDirection}, {IntrinsicOp::MOP_CommittedObjectRayOrigin, TranslateRayQueryFloat3Getter, DXIL::OpCode::RayQuery_CommittedObjectRayOrigin}, {IntrinsicOp::MOP_CommittedObjectToWorld3x4, TranslateRayQueryMatrix3x4Operation, DXIL::OpCode::RayQuery_CommittedObjectToWorld3x4}, {IntrinsicOp::MOP_CommittedObjectToWorld4x3, TranslateRayQueryTransposedMatrix3x4Operation, DXIL::OpCode::RayQuery_CommittedObjectToWorld3x4}, {IntrinsicOp::MOP_CommittedPrimitiveIndex, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CommittedPrimitiveIndex}, {IntrinsicOp::MOP_CommittedRayT, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CommittedRayT}, {IntrinsicOp::MOP_CommittedStatus, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CommittedStatus}, {IntrinsicOp::MOP_CommittedTriangleBarycentrics, TranslateRayQueryFloat2Getter, DXIL::OpCode::RayQuery_CommittedTriangleBarycentrics}, {IntrinsicOp::MOP_CommittedTriangleFrontFace, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_CommittedTriangleFrontFace}, {IntrinsicOp::MOP_CommittedWorldToObject3x4, TranslateRayQueryMatrix3x4Operation, DXIL::OpCode::RayQuery_CommittedWorldToObject3x4}, {IntrinsicOp::MOP_CommittedWorldToObject4x3, TranslateRayQueryTransposedMatrix3x4Operation, DXIL::OpCode::RayQuery_CommittedWorldToObject3x4}, {IntrinsicOp::MOP_Proceed, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_Proceed}, {IntrinsicOp::MOP_RayFlags, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_RayFlags}, {IntrinsicOp::MOP_RayTMin, TranslateGenericRayQueryMethod, DXIL::OpCode::RayQuery_RayTMin}, {IntrinsicOp::MOP_TraceRayInline, TranslateTraceRayInline, DXIL::OpCode::RayQuery_TraceRayInline}, {IntrinsicOp::MOP_WorldRayDirection, TranslateRayQueryFloat3Getter, DXIL::OpCode::RayQuery_WorldRayDirection}, {IntrinsicOp::MOP_WorldRayOrigin, TranslateRayQueryFloat3Getter, DXIL::OpCode::RayQuery_WorldRayOrigin}, {IntrinsicOp::MOP_Count, TranslateNodeGetInputRecordCount, DXIL::OpCode::GetInputRecordCount}, {IntrinsicOp::MOP_FinishedCrossGroupSharing, TranslateNodeFinishedCrossGroupSharing, DXIL::OpCode::FinishedCrossGroupSharing}, {IntrinsicOp::MOP_GetGroupNodeOutputRecords, TranslateGetGroupNodeOutputRecords, DXIL::OpCode::AllocateNodeOutputRecords}, {IntrinsicOp::MOP_GetThreadNodeOutputRecords, TranslateGetThreadNodeOutputRecords, DXIL::OpCode::AllocateNodeOutputRecords}, {IntrinsicOp::MOP_IsValid, TranslateNodeOutputIsValid, DXIL::OpCode::NodeOutputIsValid}, {IntrinsicOp::MOP_GroupIncrementOutputCount, TranslateNodeGroupIncrementOutputCount, DXIL::OpCode::IncrementOutputCount}, {IntrinsicOp::MOP_ThreadIncrementOutputCount, TranslateNodeThreadIncrementOutputCount, DXIL::OpCode::IncrementOutputCount}, {IntrinsicOp::MOP_OutputComplete, TranslateNodeOutputComplete, DXIL::OpCode::OutputComplete}, // SPIRV change starts #ifdef ENABLE_SPIRV_CODEGEN {IntrinsicOp::MOP_SubpassLoad, UnsupportedVulkanIntrinsic, DXIL::OpCode::NumOpCodes}, #endif // ENABLE_SPIRV_CODEGEN // SPIRV change ends // Manually added part. {IntrinsicOp::IOP_InterlockedUMax, TranslateIopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_InterlockedUMin, TranslateIopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_WaveActiveUMax, TranslateWaveA2A, DXIL::OpCode::WaveActiveOp}, {IntrinsicOp::IOP_WaveActiveUMin, TranslateWaveA2A, DXIL::OpCode::WaveActiveOp}, {IntrinsicOp::IOP_WaveActiveUProduct, TranslateWaveA2A, DXIL::OpCode::WaveActiveOp}, {IntrinsicOp::IOP_WaveActiveUSum, TranslateWaveA2A, DXIL::OpCode::WaveActiveOp}, {IntrinsicOp::IOP_WaveMultiPrefixUProduct, TranslateWaveMultiPrefix, DXIL::OpCode::WaveMultiPrefixOp}, {IntrinsicOp::IOP_WaveMultiPrefixUSum, TranslateWaveMultiPrefix, DXIL::OpCode::WaveMultiPrefixOp}, {IntrinsicOp::IOP_WavePrefixUProduct, TranslateWaveA2A, DXIL::OpCode::WavePrefixOp}, {IntrinsicOp::IOP_WavePrefixUSum, TranslateWaveA2A, DXIL::OpCode::WavePrefixOp}, {IntrinsicOp::IOP_uabs, TranslateUAbs, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_uclamp, TranslateClamp, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::IOP_ufirstbithigh, TranslateFirstbitHi, DXIL::OpCode::FirstbitHi}, {IntrinsicOp::IOP_umad, TranslateFUITrinary, DXIL::OpCode::UMad}, {IntrinsicOp::IOP_umax, TranslateFUIBinary, DXIL::OpCode::UMax}, {IntrinsicOp::IOP_umin, TranslateFUIBinary, DXIL::OpCode::UMin}, {IntrinsicOp::IOP_umul, TranslateMul, DXIL::OpCode::UMul}, {IntrinsicOp::IOP_usign, TranslateUSign, DXIL::OpCode::UMax}, {IntrinsicOp::MOP_InterlockedUMax, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, {IntrinsicOp::MOP_InterlockedUMin, TranslateMopAtomicBinaryOperation, DXIL::OpCode::NumOpCodes}, }; } // namespace static_assert( sizeof(gLowerTable) / sizeof(gLowerTable[0]) == static_cast<size_t>(IntrinsicOp::Num_Intrinsics), "Intrinsic lowering table must be updated to account for new intrinsics."); static void TranslateBuiltinIntrinsic(CallInst *CI, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { unsigned opcode = hlsl::GetHLOpcode(CI); const IntrinsicLower &lower = gLowerTable[opcode]; Value *Result = lower.LowerFunc(CI, lower.IntriOpcode, lower.DxilOpcode, helper, pObjHelper, Translated); if (Result) CI->replaceAllUsesWith(Result); } // SharedMem. namespace { bool IsSharedMemPtr(Value *Ptr) { return Ptr->getType()->getPointerAddressSpace() == DXIL::kTGSMAddrSpace; } bool IsLocalVariablePtr(Value *Ptr) { while (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { Ptr = GEP->getPointerOperand(); } bool isAlloca = isa<AllocaInst>(Ptr); if (isAlloca) return true; GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr); if (!GV) return false; return GV->getLinkage() == GlobalValue::LinkageTypes::InternalLinkage; } } // namespace // Constant buffer. namespace { unsigned GetEltTypeByteSizeForConstBuf(Type *EltType, const DataLayout &DL) { DXASSERT(EltType->isIntegerTy() || EltType->isFloatingPointTy(), "not an element type"); // TODO: Use real size after change constant buffer into linear layout. if (DL.getTypeSizeInBits(EltType) <= 32) { // Constant buffer is 4 bytes align. return 4; } else return 8; } Value *GenerateCBLoad(Value *handle, Value *offset, Type *EltTy, OP *hlslOP, IRBuilder<> &Builder) { Constant *OpArg = hlslOP->GetU32Const((unsigned)OP::OpCode::CBufferLoad); DXASSERT(!EltTy->isIntegerTy(1), "Bools should not be loaded as their register representation."); // Align to 8 bytes for now. Constant *align = hlslOP->GetU32Const(8); Function *CBLoad = hlslOP->GetOpFunc(OP::OpCode::CBufferLoad, EltTy); return Builder.CreateCall(CBLoad, {OpArg, handle, offset, align}); } Value *TranslateConstBufMatLd(Type *matType, Value *handle, Value *offset, bool colMajor, OP *OP, const DataLayout &DL, IRBuilder<> &Builder) { HLMatrixType MatTy = HLMatrixType::cast(matType); Type *EltTy = MatTy.getElementTypeForMem(); unsigned matSize = MatTy.getNumElements(); std::vector<Value *> elts(matSize); Value *EltByteSize = ConstantInt::get( offset->getType(), GetEltTypeByteSizeForConstBuf(EltTy, DL)); // TODO: use real size after change constant buffer into linear layout. Value *baseOffset = offset; for (unsigned i = 0; i < matSize; i++) { elts[i] = GenerateCBLoad(handle, baseOffset, EltTy, OP, Builder); baseOffset = Builder.CreateAdd(baseOffset, EltByteSize); } Value *Vec = HLMatrixLower::BuildVector(EltTy, elts, Builder); Vec = MatTy.emitLoweredMemToReg(Vec, Builder); return Vec; } void TranslateCBGep(GetElementPtrInst *GEP, Value *handle, Value *baseOffset, hlsl::OP *hlslOP, IRBuilder<> &Builder, DxilFieldAnnotation *prevFieldAnnotation, const DataLayout &DL, DxilTypeSystem &dxilTypeSys, HLObjectOperationLowerHelper *pObjHelper); Value *GenerateVecEltFromGEP(Value *ldData, GetElementPtrInst *GEP, IRBuilder<> &Builder, bool bInsertLdNextToGEP) { DXASSERT(GEP->getNumIndices() == 2, "must have 2 level"); Value *baseIdx = (GEP->idx_begin())->get(); Value *zeroIdx = Builder.getInt32(0); DXASSERT_LOCALVAR(baseIdx && zeroIdx, baseIdx == zeroIdx, "base index must be 0"); Value *idx = (GEP->idx_begin() + 1)->get(); if (dyn_cast<ConstantInt>(idx)) { return Builder.CreateExtractElement(ldData, idx); } else { // Dynamic indexing. // Copy vec to array. Type *Ty = ldData->getType(); Type *EltTy = Ty->getVectorElementType(); unsigned vecSize = Ty->getVectorNumElements(); ArrayType *AT = ArrayType::get(EltTy, vecSize); IRBuilder<> AllocaBuilder( GEP->getParent()->getParent()->getEntryBlock().getFirstInsertionPt()); Value *tempArray = AllocaBuilder.CreateAlloca(AT); Value *zero = Builder.getInt32(0); for (unsigned int i = 0; i < vecSize; i++) { Value *Elt = Builder.CreateExtractElement(ldData, Builder.getInt32(i)); Value *Ptr = Builder.CreateInBoundsGEP(tempArray, {zero, Builder.getInt32(i)}); Builder.CreateStore(Elt, Ptr); } // Load from temp array. if (bInsertLdNextToGEP) { // Insert the new GEP just before the old and to-be-deleted GEP Builder.SetInsertPoint(GEP); } Value *EltGEP = Builder.CreateInBoundsGEP(tempArray, {zero, idx}); return Builder.CreateLoad(EltGEP); } } void TranslateResourceInCB(LoadInst *LI, HLObjectOperationLowerHelper *pObjHelper, GlobalVariable *CbGV) { if (LI->user_empty()) { LI->eraseFromParent(); return; } GetElementPtrInst *Ptr = cast<GetElementPtrInst>(LI->getPointerOperand()); CallInst *CI = cast<CallInst>(LI->user_back()); CallInst *Anno = cast<CallInst>(CI->user_back()); DxilResourceProperties RP = pObjHelper->GetResPropsFromAnnotateHandle(Anno); Value *ResPtr = pObjHelper->GetOrCreateResourceForCbPtr(Ptr, CbGV, RP); // Lower Ptr to GV base Ptr. Value *GvPtr = pObjHelper->LowerCbResourcePtr(Ptr, ResPtr); IRBuilder<> Builder(LI); Value *GvLd = Builder.CreateLoad(GvPtr); LI->replaceAllUsesWith(GvLd); LI->eraseFromParent(); } void TranslateCBAddressUser(Instruction *user, Value *handle, Value *baseOffset, hlsl::OP *hlslOP, DxilFieldAnnotation *prevFieldAnnotation, DxilTypeSystem &dxilTypeSys, const DataLayout &DL, HLObjectOperationLowerHelper *pObjHelper) { IRBuilder<> Builder(user); if (CallInst *CI = dyn_cast<CallInst>(user)) { HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); unsigned opcode = GetHLOpcode(CI); if (group == HLOpcodeGroup::HLMatLoadStore) { HLMatLoadStoreOpcode matOp = static_cast<HLMatLoadStoreOpcode>(opcode); bool colMajor = matOp == HLMatLoadStoreOpcode::ColMatLoad; DXASSERT(matOp == HLMatLoadStoreOpcode::ColMatLoad || matOp == HLMatLoadStoreOpcode::RowMatLoad, "No store on cbuffer"); Type *matType = CI->getArgOperand(HLOperandIndex::kMatLoadPtrOpIdx) ->getType() ->getPointerElementType(); Value *newLd = TranslateConstBufMatLd(matType, handle, baseOffset, colMajor, hlslOP, DL, Builder); CI->replaceAllUsesWith(newLd); CI->eraseFromParent(); } else if (group == HLOpcodeGroup::HLSubscript) { HLSubscriptOpcode subOp = static_cast<HLSubscriptOpcode>(opcode); Value *basePtr = CI->getArgOperand(HLOperandIndex::kMatSubscriptMatOpIdx); HLMatrixType MatTy = HLMatrixType::cast(basePtr->getType()->getPointerElementType()); Type *EltTy = MatTy.getElementTypeForReg(); Value *EltByteSize = ConstantInt::get( baseOffset->getType(), GetEltTypeByteSizeForConstBuf(EltTy, DL)); Value *idx = CI->getArgOperand(HLOperandIndex::kMatSubscriptSubOpIdx); Type *resultType = CI->getType()->getPointerElementType(); unsigned resultSize = 1; if (resultType->isVectorTy()) resultSize = resultType->getVectorNumElements(); DXASSERT(resultSize <= 16, "up to 4x4 elements in vector or matrix"); assert(resultSize <= 16); Value *idxList[16]; switch (subOp) { case HLSubscriptOpcode::ColMatSubscript: case HLSubscriptOpcode::RowMatSubscript: { for (unsigned i = 0; i < resultSize; i++) { Value *idx = CI->getArgOperand(HLOperandIndex::kMatSubscriptSubOpIdx + i); Value *offset = Builder.CreateMul(idx, EltByteSize); idxList[i] = Builder.CreateAdd(baseOffset, offset); } } break; case HLSubscriptOpcode::RowMatElement: case HLSubscriptOpcode::ColMatElement: { Constant *EltIdxs = cast<Constant>(idx); for (unsigned i = 0; i < resultSize; i++) { Value *offset = Builder.CreateMul(EltIdxs->getAggregateElement(i), EltByteSize); idxList[i] = Builder.CreateAdd(baseOffset, offset); } } break; default: DXASSERT(0, "invalid operation on const buffer"); break; } Value *ldData = UndefValue::get(resultType); if (resultType->isVectorTy()) { for (unsigned i = 0; i < resultSize; i++) { Value *eltData = GenerateCBLoad(handle, idxList[i], EltTy, hlslOP, Builder); ldData = Builder.CreateInsertElement(ldData, eltData, i); } } else { ldData = GenerateCBLoad(handle, idxList[0], EltTy, hlslOP, Builder); } for (auto U = CI->user_begin(); U != CI->user_end();) { Value *subsUser = *(U++); if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(subsUser)) { Value *subData = GenerateVecEltFromGEP(ldData, GEP, Builder, /*bInsertLdNextToGEP*/ true); for (auto gepU = GEP->user_begin(); gepU != GEP->user_end();) { Value *gepUser = *(gepU++); // Must be load here; LoadInst *ldUser = cast<LoadInst>(gepUser); ldUser->replaceAllUsesWith(subData); ldUser->eraseFromParent(); } GEP->eraseFromParent(); } else { // Must be load here. LoadInst *ldUser = cast<LoadInst>(subsUser); ldUser->replaceAllUsesWith(ldData); ldUser->eraseFromParent(); } } CI->eraseFromParent(); } else { DXASSERT(0, "not implemented yet"); } } else if (LoadInst *ldInst = dyn_cast<LoadInst>(user)) { Type *Ty = ldInst->getType(); Type *EltTy = Ty->getScalarType(); // Resource inside cbuffer is lowered after GenerateDxilOperations. if (dxilutil::IsHLSLObjectType(Ty)) { CallInst *CI = cast<CallInst>(handle); // CI should be annotate handle. // Need createHandle here. if (GetHLOpcodeGroup(CI->getCalledFunction()) == HLOpcodeGroup::HLAnnotateHandle) CI = cast<CallInst>(CI->getArgOperand(HLOperandIndex::kHandleOpIdx)); GlobalVariable *CbGV = cast<GlobalVariable>( CI->getArgOperand(HLOperandIndex::kCreateHandleResourceOpIdx)); TranslateResourceInCB(ldInst, pObjHelper, CbGV); return; } DXASSERT(!Ty->isAggregateType(), "should be flat in previous pass"); unsigned EltByteSize = GetEltTypeByteSizeForConstBuf(EltTy, DL); Value *newLd = GenerateCBLoad(handle, baseOffset, EltTy, hlslOP, Builder); if (Ty->isVectorTy()) { Value *result = UndefValue::get(Ty); result = Builder.CreateInsertElement(result, newLd, (uint64_t)0); // Update offset by 4 bytes. Value *offset = Builder.CreateAdd(baseOffset, hlslOP->GetU32Const(EltByteSize)); for (unsigned i = 1; i < Ty->getVectorNumElements(); i++) { Value *elt = GenerateCBLoad(handle, offset, EltTy, hlslOP, Builder); result = Builder.CreateInsertElement(result, elt, i); // Update offset by 4 bytes. offset = Builder.CreateAdd(offset, hlslOP->GetU32Const(EltByteSize)); } newLd = result; } ldInst->replaceAllUsesWith(newLd); ldInst->eraseFromParent(); } else { // Must be GEP here GetElementPtrInst *GEP = cast<GetElementPtrInst>(user); TranslateCBGep(GEP, handle, baseOffset, hlslOP, Builder, prevFieldAnnotation, DL, dxilTypeSys, pObjHelper); GEP->eraseFromParent(); } } void TranslateCBGep(GetElementPtrInst *GEP, Value *handle, Value *baseOffset, hlsl::OP *hlslOP, IRBuilder<> &Builder, DxilFieldAnnotation *prevFieldAnnotation, const DataLayout &DL, DxilTypeSystem &dxilTypeSys, HLObjectOperationLowerHelper *pObjHelper) { SmallVector<Value *, 8> Indices(GEP->idx_begin(), GEP->idx_end()); Value *offset = baseOffset; // update offset DxilFieldAnnotation *fieldAnnotation = prevFieldAnnotation; gep_type_iterator GEPIt = gep_type_begin(GEP), E = gep_type_end(GEP); for (; GEPIt != E; GEPIt++) { Value *idx = GEPIt.getOperand(); unsigned immIdx = 0; bool bImmIdx = false; if (Constant *constIdx = dyn_cast<Constant>(idx)) { immIdx = constIdx->getUniqueInteger().getLimitedValue(); bImmIdx = true; } if (GEPIt->isPointerTy()) { Type *EltTy = GEPIt->getPointerElementType(); unsigned size = 0; if (StructType *ST = dyn_cast<StructType>(EltTy)) { DxilStructAnnotation *annotation = dxilTypeSys.GetStructAnnotation(ST); size = annotation->GetCBufferSize(); } else { DXASSERT(fieldAnnotation, "must be a field"); if (ArrayType *AT = dyn_cast<ArrayType>(EltTy)) { unsigned EltSize = dxilutil::GetLegacyCBufferFieldElementSize( *fieldAnnotation, EltTy, dxilTypeSys); // Decide the nested array size. unsigned nestedArraySize = 1; Type *EltTy = AT->getArrayElementType(); // support multi level of array while (EltTy->isArrayTy()) { ArrayType *EltAT = cast<ArrayType>(EltTy); nestedArraySize *= EltAT->getNumElements(); EltTy = EltAT->getElementType(); } // Align to 4 * 4 bytes. unsigned alignedSize = (EltSize + 15) & 0xfffffff0; size = nestedArraySize * alignedSize; } else { size = DL.getTypeAllocSize(EltTy); } } // Align to 4 * 4 bytes. size = (size + 15) & 0xfffffff0; if (bImmIdx) { unsigned tempOffset = size * immIdx; offset = Builder.CreateAdd(offset, hlslOP->GetU32Const(tempOffset)); } else { Value *tempOffset = Builder.CreateMul(idx, hlslOP->GetU32Const(size)); offset = Builder.CreateAdd(offset, tempOffset); } } else if (GEPIt->isStructTy()) { StructType *ST = cast<StructType>(*GEPIt); DxilStructAnnotation *annotation = dxilTypeSys.GetStructAnnotation(ST); fieldAnnotation = &annotation->GetFieldAnnotation(immIdx); unsigned structOffset = fieldAnnotation->GetCBufferOffset(); offset = Builder.CreateAdd(offset, hlslOP->GetU32Const(structOffset)); } else if (GEPIt->isArrayTy()) { DXASSERT(fieldAnnotation != nullptr, "must a field"); unsigned EltSize = dxilutil::GetLegacyCBufferFieldElementSize( *fieldAnnotation, *GEPIt, dxilTypeSys); // Decide the nested array size. unsigned nestedArraySize = 1; Type *EltTy = GEPIt->getArrayElementType(); // support multi level of array while (EltTy->isArrayTy()) { ArrayType *EltAT = cast<ArrayType>(EltTy); nestedArraySize *= EltAT->getNumElements(); EltTy = EltAT->getElementType(); } // Align to 4 * 4 bytes. unsigned alignedSize = (EltSize + 15) & 0xfffffff0; unsigned size = nestedArraySize * alignedSize; if (bImmIdx) { unsigned tempOffset = size * immIdx; offset = Builder.CreateAdd(offset, hlslOP->GetU32Const(tempOffset)); } else { Value *tempOffset = Builder.CreateMul(idx, hlslOP->GetU32Const(size)); offset = Builder.CreateAdd(offset, tempOffset); } } else if (GEPIt->isVectorTy()) { unsigned size = DL.getTypeAllocSize(GEPIt->getVectorElementType()); if (bImmIdx) { unsigned tempOffset = size * immIdx; offset = Builder.CreateAdd(offset, hlslOP->GetU32Const(tempOffset)); } else { Value *tempOffset = Builder.CreateMul(idx, hlslOP->GetU32Const(size)); offset = Builder.CreateAdd(offset, tempOffset); } } else { gep_type_iterator temp = GEPIt; temp++; DXASSERT(temp == E, "scalar type must be the last"); } } for (auto U = GEP->user_begin(); U != GEP->user_end();) { Instruction *user = cast<Instruction>(*(U++)); TranslateCBAddressUser(user, handle, offset, hlslOP, fieldAnnotation, dxilTypeSys, DL, pObjHelper); } } Value *GenerateCBLoadLegacy(Value *handle, Value *legacyIdx, unsigned channelOffset, Type *EltTy, OP *hlslOP, IRBuilder<> &Builder) { Constant *OpArg = hlslOP->GetU32Const((unsigned)OP::OpCode::CBufferLoadLegacy); DXASSERT(!EltTy->isIntegerTy(1), "Bools should not be loaded as their register representation."); Type *doubleTy = Type::getDoubleTy(EltTy->getContext()); Type *halfTy = Type::getHalfTy(EltTy->getContext()); Type *i64Ty = Type::getInt64Ty(EltTy->getContext()); Type *i16Ty = Type::getInt16Ty(EltTy->getContext()); bool is64 = (EltTy == doubleTy) | (EltTy == i64Ty); bool is16 = (EltTy == halfTy || EltTy == i16Ty) && !hlslOP->UseMinPrecision(); DXASSERT_LOCALVAR(is16, (is16 && channelOffset < 8) || channelOffset < 4, "legacy cbuffer don't across 16 bytes register."); if (is64) { Function *CBLoad = hlslOP->GetOpFunc(OP::OpCode::CBufferLoadLegacy, EltTy); Value *loadLegacy = Builder.CreateCall(CBLoad, {OpArg, handle, legacyIdx}); DXASSERT((channelOffset & 1) == 0, "channel offset must be even for double"); unsigned eltIdx = channelOffset >> 1; Value *Result = Builder.CreateExtractValue(loadLegacy, eltIdx); return Result; } else { Function *CBLoad = hlslOP->GetOpFunc(OP::OpCode::CBufferLoadLegacy, EltTy); Value *loadLegacy = Builder.CreateCall(CBLoad, {OpArg, handle, legacyIdx}); return Builder.CreateExtractValue(loadLegacy, channelOffset); } } Value *GenerateCBLoadLegacy(Value *handle, Value *legacyIdx, unsigned channelOffset, Type *EltTy, unsigned vecSize, OP *hlslOP, IRBuilder<> &Builder) { Constant *OpArg = hlslOP->GetU32Const((unsigned)OP::OpCode::CBufferLoadLegacy); DXASSERT(!EltTy->isIntegerTy(1), "Bools should not be loaded as their register representation."); Type *doubleTy = Type::getDoubleTy(EltTy->getContext()); Type *i64Ty = Type::getInt64Ty(EltTy->getContext()); Type *halfTy = Type::getHalfTy(EltTy->getContext()); Type *shortTy = Type::getInt16Ty(EltTy->getContext()); bool is64 = (EltTy == doubleTy) | (EltTy == i64Ty); bool is16 = (EltTy == shortTy || EltTy == halfTy) && !hlslOP->UseMinPrecision(); DXASSERT((is16 && channelOffset + vecSize <= 8) || (channelOffset + vecSize) <= 4, "legacy cbuffer don't across 16 bytes register."); if (is16) { Function *CBLoad = hlslOP->GetOpFunc(OP::OpCode::CBufferLoadLegacy, EltTy); Value *loadLegacy = Builder.CreateCall(CBLoad, {OpArg, handle, legacyIdx}); Value *Result = UndefValue::get(VectorType::get(EltTy, vecSize)); for (unsigned i = 0; i < vecSize; ++i) { Value *NewElt = Builder.CreateExtractValue(loadLegacy, channelOffset + i); Result = Builder.CreateInsertElement(Result, NewElt, i); } return Result; } else if (is64) { Function *CBLoad = hlslOP->GetOpFunc(OP::OpCode::CBufferLoadLegacy, EltTy); Value *loadLegacy = Builder.CreateCall(CBLoad, {OpArg, handle, legacyIdx}); Value *Result = UndefValue::get(VectorType::get(EltTy, vecSize)); unsigned smallVecSize = 2; if (vecSize < smallVecSize) smallVecSize = vecSize; for (unsigned i = 0; i < smallVecSize; ++i) { Value *NewElt = Builder.CreateExtractValue(loadLegacy, channelOffset + i); Result = Builder.CreateInsertElement(Result, NewElt, i); } if (vecSize > 2) { // Got to next cb register. legacyIdx = Builder.CreateAdd(legacyIdx, hlslOP->GetU32Const(1)); Value *loadLegacy = Builder.CreateCall(CBLoad, {OpArg, handle, legacyIdx}); for (unsigned i = 2; i < vecSize; ++i) { Value *NewElt = Builder.CreateExtractValue(loadLegacy, i - 2); Result = Builder.CreateInsertElement(Result, NewElt, i); } } return Result; } else { Function *CBLoad = hlslOP->GetOpFunc(OP::OpCode::CBufferLoadLegacy, EltTy); Value *loadLegacy = Builder.CreateCall(CBLoad, {OpArg, handle, legacyIdx}); Value *Result = UndefValue::get(VectorType::get(EltTy, vecSize)); for (unsigned i = 0; i < vecSize; ++i) { Value *NewElt = Builder.CreateExtractValue(loadLegacy, channelOffset + i); Result = Builder.CreateInsertElement(Result, NewElt, i); } return Result; } } Value *TranslateConstBufMatLdLegacy(HLMatrixType MatTy, Value *handle, Value *legacyIdx, bool colMajor, OP *OP, bool memElemRepr, const DataLayout &DL, IRBuilder<> &Builder) { Type *EltTy = MatTy.getElementTypeForMem(); unsigned matSize = MatTy.getNumElements(); std::vector<Value *> elts(matSize); unsigned EltByteSize = GetEltTypeByteSizeForConstBuf(EltTy, DL); if (colMajor) { unsigned colByteSize = 4 * EltByteSize; unsigned colRegSize = (colByteSize + 15) >> 4; for (unsigned c = 0; c < MatTy.getNumColumns(); c++) { Value *col = GenerateCBLoadLegacy(handle, legacyIdx, /*channelOffset*/ 0, EltTy, MatTy.getNumRows(), OP, Builder); for (unsigned r = 0; r < MatTy.getNumRows(); r++) { unsigned matIdx = MatTy.getColumnMajorIndex(r, c); elts[matIdx] = Builder.CreateExtractElement(col, r); } // Update offset for a column. legacyIdx = Builder.CreateAdd(legacyIdx, OP->GetU32Const(colRegSize)); } } else { unsigned rowByteSize = 4 * EltByteSize; unsigned rowRegSize = (rowByteSize + 15) >> 4; for (unsigned r = 0; r < MatTy.getNumRows(); r++) { Value *row = GenerateCBLoadLegacy(handle, legacyIdx, /*channelOffset*/ 0, EltTy, MatTy.getNumColumns(), OP, Builder); for (unsigned c = 0; c < MatTy.getNumColumns(); c++) { unsigned matIdx = MatTy.getRowMajorIndex(r, c); elts[matIdx] = Builder.CreateExtractElement(row, c); } // Update offset for a row. legacyIdx = Builder.CreateAdd(legacyIdx, OP->GetU32Const(rowRegSize)); } } Value *Vec = HLMatrixLower::BuildVector(EltTy, elts, Builder); if (!memElemRepr) Vec = MatTy.emitLoweredMemToReg(Vec, Builder); return Vec; } void TranslateCBGepLegacy(GetElementPtrInst *GEP, Value *handle, Value *legacyIdx, unsigned channelOffset, hlsl::OP *hlslOP, IRBuilder<> &Builder, DxilFieldAnnotation *prevFieldAnnotation, const DataLayout &DL, DxilTypeSystem &dxilTypeSys, HLObjectOperationLowerHelper *pObjHelper); void TranslateCBAddressUserLegacy(Instruction *user, Value *handle, Value *legacyIdx, unsigned channelOffset, hlsl::OP *hlslOP, DxilFieldAnnotation *prevFieldAnnotation, DxilTypeSystem &dxilTypeSys, const DataLayout &DL, HLObjectOperationLowerHelper *pObjHelper) { IRBuilder<> Builder(user); if (CallInst *CI = dyn_cast<CallInst>(user)) { HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); if (group == HLOpcodeGroup::HLMatLoadStore) { unsigned opcode = GetHLOpcode(CI); HLMatLoadStoreOpcode matOp = static_cast<HLMatLoadStoreOpcode>(opcode); bool colMajor = matOp == HLMatLoadStoreOpcode::ColMatLoad; DXASSERT(matOp == HLMatLoadStoreOpcode::ColMatLoad || matOp == HLMatLoadStoreOpcode::RowMatLoad, "No store on cbuffer"); HLMatrixType MatTy = HLMatrixType::cast(CI->getArgOperand(HLOperandIndex::kMatLoadPtrOpIdx) ->getType() ->getPointerElementType()); // This will replace a call, so we should use the register representation // of elements Value *newLd = TranslateConstBufMatLdLegacy( MatTy, handle, legacyIdx, colMajor, hlslOP, /*memElemRepr*/ false, DL, Builder); CI->replaceAllUsesWith(newLd); dxilutil::TryScatterDebugValueToVectorElements(newLd); CI->eraseFromParent(); } else if (group == HLOpcodeGroup::HLSubscript) { unsigned opcode = GetHLOpcode(CI); HLSubscriptOpcode subOp = static_cast<HLSubscriptOpcode>(opcode); Value *basePtr = CI->getArgOperand(HLOperandIndex::kMatSubscriptMatOpIdx); HLMatrixType MatTy = HLMatrixType::cast(basePtr->getType()->getPointerElementType()); Type *EltTy = MatTy.getElementTypeForReg(); Value *idx = CI->getArgOperand(HLOperandIndex::kMatSubscriptSubOpIdx); Type *resultType = CI->getType()->getPointerElementType(); unsigned resultSize = 1; if (resultType->isVectorTy()) resultSize = resultType->getVectorNumElements(); DXASSERT(resultSize <= 16, "up to 4x4 elements in vector or matrix"); assert(resultSize <= 16); Value *idxList[16]; bool colMajor = subOp == HLSubscriptOpcode::ColMatSubscript || subOp == HLSubscriptOpcode::ColMatElement; bool dynamicIndexing = !isa<ConstantInt>(idx) && !isa<ConstantAggregateZero>(idx) && !isa<ConstantDataSequential>(idx); Value *ldData = UndefValue::get(resultType); if (!dynamicIndexing) { // This will replace a load or GEP, so we should use the memory // representation of elements Value *matLd = TranslateConstBufMatLdLegacy( MatTy, handle, legacyIdx, colMajor, hlslOP, /*memElemRepr*/ true, DL, Builder); // The matLd is keep original layout, just use the idx calc in // EmitHLSLMatrixElement and EmitHLSLMatrixSubscript. switch (subOp) { case HLSubscriptOpcode::RowMatSubscript: case HLSubscriptOpcode::ColMatSubscript: { for (unsigned i = 0; i < resultSize; i++) { idxList[i] = CI->getArgOperand(HLOperandIndex::kMatSubscriptSubOpIdx + i); } } break; case HLSubscriptOpcode::RowMatElement: case HLSubscriptOpcode::ColMatElement: { Constant *EltIdxs = cast<Constant>(idx); for (unsigned i = 0; i < resultSize; i++) { idxList[i] = EltIdxs->getAggregateElement(i); } } break; default: DXASSERT(0, "invalid operation on const buffer"); break; } if (resultType->isVectorTy()) { for (unsigned i = 0; i < resultSize; i++) { Value *eltData = Builder.CreateExtractElement(matLd, idxList[i]); ldData = Builder.CreateInsertElement(ldData, eltData, i); } } else { Value *eltData = Builder.CreateExtractElement(matLd, idxList[0]); ldData = eltData; } } else { // Must be matSub here. Value *idx = CI->getArgOperand(HLOperandIndex::kMatSubscriptSubOpIdx); if (colMajor) { // idx is c * row + r. // For first col, c is 0, so idx is r. Value *one = Builder.getInt32(1); // row.x = c[0].[idx] // row.y = c[1].[idx] // row.z = c[2].[idx] // row.w = c[3].[idx] Value *Elts[4]; ArrayType *AT = ArrayType::get(EltTy, MatTy.getNumColumns()); IRBuilder<> AllocaBuilder(user->getParent() ->getParent() ->getEntryBlock() .getFirstInsertionPt()); Value *tempArray = AllocaBuilder.CreateAlloca(AT); Value *zero = AllocaBuilder.getInt32(0); Value *cbufIdx = legacyIdx; for (unsigned int c = 0; c < MatTy.getNumColumns(); c++) { Value *ColVal = GenerateCBLoadLegacy( handle, cbufIdx, /*channelOffset*/ 0, EltTy, MatTy.getNumRows(), hlslOP, Builder); // Convert ColVal to array for indexing. for (unsigned int r = 0; r < MatTy.getNumRows(); r++) { Value *Elt = Builder.CreateExtractElement(ColVal, Builder.getInt32(r)); Value *Ptr = Builder.CreateInBoundsGEP( tempArray, {zero, Builder.getInt32(r)}); Builder.CreateStore(Elt, Ptr); } Value *Ptr = Builder.CreateInBoundsGEP(tempArray, {zero, idx}); Elts[c] = Builder.CreateLoad(Ptr); // Update cbufIdx. cbufIdx = Builder.CreateAdd(cbufIdx, one); } if (resultType->isVectorTy()) { for (unsigned int c = 0; c < MatTy.getNumColumns(); c++) { ldData = Builder.CreateInsertElement(ldData, Elts[c], c); } } else { ldData = Elts[0]; } } else { // idx is r * col + c; // r = idx / col; Value *cCol = ConstantInt::get(idx->getType(), MatTy.getNumColumns()); idx = Builder.CreateUDiv(idx, cCol); idx = Builder.CreateAdd(idx, legacyIdx); // Just return a row; 'col' is the number of columns in the row. ldData = GenerateCBLoadLegacy(handle, idx, /*channelOffset*/ 0, EltTy, MatTy.getNumColumns(), hlslOP, Builder); } if (!resultType->isVectorTy()) { ldData = Builder.CreateExtractElement(ldData, Builder.getInt32(0)); } } for (auto U = CI->user_begin(); U != CI->user_end();) { Value *subsUser = *(U++); if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(subsUser)) { Value *subData = GenerateVecEltFromGEP(ldData, GEP, Builder, /*bInsertLdNextToGEP*/ true); for (auto gepU = GEP->user_begin(); gepU != GEP->user_end();) { Value *gepUser = *(gepU++); // Must be load here; LoadInst *ldUser = cast<LoadInst>(gepUser); ldUser->replaceAllUsesWith(subData); ldUser->eraseFromParent(); } GEP->eraseFromParent(); } else { // Must be load here. LoadInst *ldUser = cast<LoadInst>(subsUser); ldUser->replaceAllUsesWith(ldData); ldUser->eraseFromParent(); } } CI->eraseFromParent(); } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(user)) { if (II->getIntrinsicID() == Intrinsic::lifetime_start || II->getIntrinsicID() == Intrinsic::lifetime_end) { DXASSERT(II->use_empty(), "lifetime intrinsic can't have uses"); II->eraseFromParent(); } else { DXASSERT(0, "not implemented yet"); } } else { DXASSERT(0, "not implemented yet"); } } else if (LoadInst *ldInst = dyn_cast<LoadInst>(user)) { Type *Ty = ldInst->getType(); Type *EltTy = Ty->getScalarType(); // Resource inside cbuffer is lowered after GenerateDxilOperations. if (dxilutil::IsHLSLObjectType(Ty)) { CallInst *CI = cast<CallInst>(handle); // CI should be annotate handle. // Need createHandle here. if (GetHLOpcodeGroup(CI->getCalledFunction()) == HLOpcodeGroup::HLAnnotateHandle) CI = cast<CallInst>(CI->getArgOperand(HLOperandIndex::kHandleOpIdx)); GlobalVariable *CbGV = cast<GlobalVariable>( CI->getArgOperand(HLOperandIndex::kCreateHandleResourceOpIdx)); TranslateResourceInCB(ldInst, pObjHelper, CbGV); return; } DXASSERT(!Ty->isAggregateType(), "should be flat in previous pass"); Value *newLd = nullptr; if (Ty->isVectorTy()) newLd = GenerateCBLoadLegacy(handle, legacyIdx, channelOffset, EltTy, Ty->getVectorNumElements(), hlslOP, Builder); else newLd = GenerateCBLoadLegacy(handle, legacyIdx, channelOffset, EltTy, hlslOP, Builder); ldInst->replaceAllUsesWith(newLd); dxilutil::TryScatterDebugValueToVectorElements(newLd); ldInst->eraseFromParent(); } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(user)) { for (auto it = BCI->user_begin(); it != BCI->user_end();) { Instruction *I = cast<Instruction>(*it++); TranslateCBAddressUserLegacy(I, handle, legacyIdx, channelOffset, hlslOP, prevFieldAnnotation, dxilTypeSys, DL, pObjHelper); } BCI->eraseFromParent(); } else { // Must be GEP here GetElementPtrInst *GEP = cast<GetElementPtrInst>(user); TranslateCBGepLegacy(GEP, handle, legacyIdx, channelOffset, hlslOP, Builder, prevFieldAnnotation, DL, dxilTypeSys, pObjHelper); GEP->eraseFromParent(); } } void TranslateCBGepLegacy(GetElementPtrInst *GEP, Value *handle, Value *legacyIndex, unsigned channel, hlsl::OP *hlslOP, IRBuilder<> &Builder, DxilFieldAnnotation *prevFieldAnnotation, const DataLayout &DL, DxilTypeSystem &dxilTypeSys, HLObjectOperationLowerHelper *pObjHelper) { SmallVector<Value *, 8> Indices(GEP->idx_begin(), GEP->idx_end()); // update offset DxilFieldAnnotation *fieldAnnotation = prevFieldAnnotation; gep_type_iterator GEPIt = gep_type_begin(GEP), E = gep_type_end(GEP); for (; GEPIt != E; GEPIt++) { Value *idx = GEPIt.getOperand(); unsigned immIdx = 0; bool bImmIdx = false; if (Constant *constIdx = dyn_cast<Constant>(idx)) { immIdx = constIdx->getUniqueInteger().getLimitedValue(); bImmIdx = true; } if (GEPIt->isPointerTy()) { Type *EltTy = GEPIt->getPointerElementType(); unsigned size = 0; if (StructType *ST = dyn_cast<StructType>(EltTy)) { DxilStructAnnotation *annotation = dxilTypeSys.GetStructAnnotation(ST); size = annotation->GetCBufferSize(); } else { DXASSERT(fieldAnnotation, "must be a field"); if (ArrayType *AT = dyn_cast<ArrayType>(EltTy)) { unsigned EltSize = dxilutil::GetLegacyCBufferFieldElementSize( *fieldAnnotation, EltTy, dxilTypeSys); // Decide the nested array size. unsigned nestedArraySize = 1; Type *EltTy = AT->getArrayElementType(); // support multi level of array while (EltTy->isArrayTy()) { ArrayType *EltAT = cast<ArrayType>(EltTy); nestedArraySize *= EltAT->getNumElements(); EltTy = EltAT->getElementType(); } // Align to 4 * 4 bytes. unsigned alignedSize = (EltSize + 15) & 0xfffffff0; size = nestedArraySize * alignedSize; } else { size = DL.getTypeAllocSize(EltTy); } } // Skip 0 idx. if (bImmIdx && immIdx == 0) continue; // Align to 4 * 4 bytes. size = (size + 15) & 0xfffffff0; // Take this as array idxing. if (bImmIdx) { unsigned tempOffset = size * immIdx; unsigned idxInc = tempOffset >> 4; legacyIndex = Builder.CreateAdd(legacyIndex, hlslOP->GetU32Const(idxInc)); } else { Value *idxInc = Builder.CreateMul(idx, hlslOP->GetU32Const(size >> 4)); legacyIndex = Builder.CreateAdd(legacyIndex, idxInc); } // Array always start from x channel. channel = 0; } else if (GEPIt->isStructTy()) { StructType *ST = cast<StructType>(*GEPIt); DxilStructAnnotation *annotation = dxilTypeSys.GetStructAnnotation(ST); fieldAnnotation = &annotation->GetFieldAnnotation(immIdx); unsigned idxInc = 0; unsigned structOffset = 0; if (fieldAnnotation->GetCompType().Is16Bit() && !hlslOP->UseMinPrecision()) { structOffset = fieldAnnotation->GetCBufferOffset() >> 1; channel += structOffset; idxInc = channel >> 3; channel = channel & 0x7; } else { structOffset = fieldAnnotation->GetCBufferOffset() >> 2; channel += structOffset; idxInc = channel >> 2; channel = channel & 0x3; } if (idxInc) legacyIndex = Builder.CreateAdd(legacyIndex, hlslOP->GetU32Const(idxInc)); } else if (GEPIt->isArrayTy()) { DXASSERT(fieldAnnotation != nullptr, "must a field"); unsigned EltSize = dxilutil::GetLegacyCBufferFieldElementSize( *fieldAnnotation, *GEPIt, dxilTypeSys); // Decide the nested array size. unsigned nestedArraySize = 1; Type *EltTy = GEPIt->getArrayElementType(); // support multi level of array while (EltTy->isArrayTy()) { ArrayType *EltAT = cast<ArrayType>(EltTy); nestedArraySize *= EltAT->getNumElements(); EltTy = EltAT->getElementType(); } // Align to 4 * 4 bytes. unsigned alignedSize = (EltSize + 15) & 0xfffffff0; unsigned size = nestedArraySize * alignedSize; if (bImmIdx) { unsigned tempOffset = size * immIdx; unsigned idxInc = tempOffset >> 4; legacyIndex = Builder.CreateAdd(legacyIndex, hlslOP->GetU32Const(idxInc)); } else { Value *idxInc = Builder.CreateMul(idx, hlslOP->GetU32Const(size >> 4)); legacyIndex = Builder.CreateAdd(legacyIndex, idxInc); } // Array always start from x channel. channel = 0; } else if (GEPIt->isVectorTy()) { // Indexing on vector. if (bImmIdx) { if (immIdx < GEPIt->getVectorNumElements()) { const unsigned vectorElmSize = DL.getTypeAllocSize(GEPIt->getVectorElementType()); const bool bIs16bitType = vectorElmSize == 2; const unsigned tempOffset = vectorElmSize * immIdx; const unsigned numChannelsPerRow = bIs16bitType ? 8 : 4; const unsigned channelInc = bIs16bitType ? tempOffset >> 1 : tempOffset >> 2; DXASSERT((channel + channelInc) < numChannelsPerRow, "vector should not cross cb register"); channel += channelInc; if (channel == numChannelsPerRow) { // Get to another row. // Update index and channel. channel = 0; legacyIndex = Builder.CreateAdd(legacyIndex, Builder.getInt32(1)); } } else { StringRef resName = "(unknown)"; if (DxilResourceBase *Res = pObjHelper->FindCBufferResourceFromHandle(handle)) { resName = Res->GetGlobalName(); } legacyIndex = hlsl::CreatePoisonValue( legacyIndex->getType(), Twine("Out of bounds index (") + Twine(immIdx) + Twine(") in CBuffer '") + Twine(resName) + ("'"), GEP->getDebugLoc(), GEP); channel = 0; } } else { Type *EltTy = GEPIt->getVectorElementType(); unsigned vecSize = GEPIt->getVectorNumElements(); // Load the whole register. Value *newLd = GenerateCBLoadLegacy(handle, legacyIndex, /*channelOffset*/ channel, EltTy, /*vecSize*/ vecSize, hlslOP, Builder); // Copy to array. IRBuilder<> AllocaBuilder(GEP->getParent() ->getParent() ->getEntryBlock() .getFirstInsertionPt()); Value *tempArray = AllocaBuilder.CreateAlloca(ArrayType::get(EltTy, vecSize)); Value *zeroIdx = hlslOP->GetU32Const(0); for (unsigned i = 0; i < vecSize; i++) { Value *Elt = Builder.CreateExtractElement(newLd, i); Value *EltGEP = Builder.CreateInBoundsGEP( tempArray, {zeroIdx, hlslOP->GetU32Const(i)}); Builder.CreateStore(Elt, EltGEP); } // Make sure this is the end of GEP. gep_type_iterator temp = GEPIt; temp++; DXASSERT(temp == E, "scalar type must be the last"); // Replace the GEP with array GEP. Value *ArrayGEP = Builder.CreateInBoundsGEP(tempArray, {zeroIdx, idx}); GEP->replaceAllUsesWith(ArrayGEP); return; } } else { gep_type_iterator temp = GEPIt; temp++; DXASSERT(temp == E, "scalar type must be the last"); } } for (auto U = GEP->user_begin(); U != GEP->user_end();) { Instruction *user = cast<Instruction>(*(U++)); TranslateCBAddressUserLegacy(user, handle, legacyIndex, channel, hlslOP, fieldAnnotation, dxilTypeSys, DL, pObjHelper); } } void TranslateCBOperationsLegacy(Value *handle, Value *ptr, OP *hlslOP, DxilTypeSystem &dxilTypeSys, const DataLayout &DL, HLObjectOperationLowerHelper *pObjHelper) { auto User = ptr->user_begin(); auto UserE = ptr->user_end(); Value *zeroIdx = hlslOP->GetU32Const(0); for (; User != UserE;) { // Must be Instruction. Instruction *I = cast<Instruction>(*(User++)); TranslateCBAddressUserLegacy( I, handle, zeroIdx, /*channelOffset*/ 0, hlslOP, /*prevFieldAnnotation*/ nullptr, dxilTypeSys, DL, pObjHelper); } } } // namespace // Structured buffer. namespace { // Load a value from a typedef buffer with an offset. // Typed buffer do not directly support reading at offsets // because the whole value (e.g. float4) must be read at once. // If we are provided a non-zero offset, we need to simulate it // by returning the correct elements. using ResRetValueArray = std::array<Value *, 4>; static ResRetValueArray GenerateTypedBufferLoad(Value *Handle, Type *BufferElemTy, Value *ElemIdx, Value *StatusPtr, OP *HlslOP, IRBuilder<> &Builder) { OP::OpCode OpCode = OP::OpCode::BufferLoad; Value *LoadArgs[] = {HlslOP->GetU32Const((unsigned)OpCode), Handle, ElemIdx, UndefValue::get(Builder.getInt32Ty())}; Function *LoadFunc = HlslOP->GetOpFunc(OpCode, BufferElemTy); Value *Load = Builder.CreateCall(LoadFunc, LoadArgs, OP::GetOpCodeName(OpCode)); ResRetValueArray ResultValues; for (unsigned i = 0; i < ResultValues.size(); ++i) { ResultValues[i] = cast<ExtractValueInst>(Builder.CreateExtractValue(Load, {i})); } UpdateStatus(Load, StatusPtr, Builder, HlslOP); return ResultValues; } static AllocaInst *SpillValuesToArrayAlloca(ArrayRef<Value *> Values, IRBuilder<> &Builder) { DXASSERT_NOMSG(!Values.empty()); IRBuilder<> AllocaBuilder( dxilutil::FindAllocaInsertionPt(Builder.GetInsertPoint())); AllocaInst *ArrayAlloca = AllocaBuilder.CreateAlloca( ArrayType::get(Values[0]->getType(), Values.size())); for (unsigned i = 0; i < Values.size(); ++i) { Value *ArrayElemPtr = Builder.CreateGEP( ArrayAlloca, {Builder.getInt32(0), Builder.getInt32(i)}); Builder.CreateStore(Values[i], ArrayElemPtr); } return ArrayAlloca; } static Value *ExtractFromTypedBufferLoad(const ResRetValueArray &ResRet, Type *ResultTy, Value *Offset, IRBuilder<> &Builder) { unsigned ElemCount = ResultTy->isVectorTy() ? ResultTy->getVectorNumElements() : 1; DXASSERT_NOMSG(ElemCount < ResRet.size()); unsigned ElemSizeInBytes = ResRet[0]->getType()->getScalarSizeInBits() / 8; SmallVector<Value *, 4> Elems; if (ConstantInt *OffsetAsConstantInt = dyn_cast<ConstantInt>(Offset)) { // Get all elements to be returned uint64_t FirstElemOffset = OffsetAsConstantInt->getLimitedValue(); DXASSERT_NOMSG(FirstElemOffset % ElemSizeInBytes == 0); uint64_t FirstElemIdx = FirstElemOffset / ElemSizeInBytes; DXASSERT_NOMSG(FirstElemIdx <= ResRet.size() - ElemCount); for (unsigned ElemIdx = 0; ElemIdx < ElemCount; ++ElemIdx) { Elems.emplace_back( ResRet[std::min<size_t>(FirstElemIdx + ElemIdx, ResRet.size() - 1)]); } } else { Value *ArrayAlloca = SpillValuesToArrayAlloca( ArrayRef<Value *>(ResRet.data(), ResRet.size()), Builder); // Get all elements to be returned through dynamic indices Value *FirstElemIdx = Builder.CreateUDiv(Offset, Builder.getInt32(ElemSizeInBytes)); for (unsigned i = 0; i < ElemCount; ++i) { Value *ElemIdx = Builder.CreateAdd(FirstElemIdx, Builder.getInt32(i)); Value *ElemPtr = Builder.CreateGEP(ArrayAlloca, {Builder.getInt32(0), ElemIdx}); Elems.emplace_back(Builder.CreateLoad(ElemPtr)); } } return ScalarizeElements(ResultTy, Elems, Builder); } Value *GenerateRawBufLd(Value *handle, Value *bufIdx, Value *offset, Value *status, Type *EltTy, MutableArrayRef<Value *> resultElts, hlsl::OP *OP, IRBuilder<> &Builder, unsigned NumComponents, Constant *alignment) { OP::OpCode opcode = OP::OpCode::RawBufferLoad; DXASSERT(resultElts.size() <= 4, "buffer load cannot load more than 4 values"); if (bufIdx == nullptr) { // This is actually a byte address buffer load with a struct template type. // The call takes only one coordinates for the offset. bufIdx = offset; offset = UndefValue::get(offset->getType()); } Function *dxilF = OP->GetOpFunc(opcode, EltTy); Constant *mask = GetRawBufferMaskForETy(EltTy, NumComponents, OP); Value *Args[] = {OP->GetU32Const((unsigned)opcode), handle, bufIdx, offset, mask, alignment}; Value *Ld = Builder.CreateCall(dxilF, Args, OP::GetOpCodeName(opcode)); for (unsigned i = 0; i < resultElts.size(); i++) { resultElts[i] = Builder.CreateExtractValue(Ld, i); } // status UpdateStatus(Ld, status, Builder, OP); return Ld; } void GenerateStructBufSt(Value *handle, Value *bufIdx, Value *offset, Type *EltTy, hlsl::OP *OP, IRBuilder<> &Builder, ArrayRef<Value *> vals, uint8_t mask, Constant *alignment) { OP::OpCode opcode = OP::OpCode::RawBufferStore; DXASSERT(vals.size() == 4, "buffer store need 4 values"); Value *Args[] = {OP->GetU32Const((unsigned)opcode), handle, bufIdx, offset, vals[0], vals[1], vals[2], vals[3], OP->GetU8Const(mask), alignment}; Function *dxilF = OP->GetOpFunc(opcode, EltTy); Builder.CreateCall(dxilF, Args); } static Value *TranslateRawBufVecLd(Type *VecEltTy, unsigned ElemCount, IRBuilder<> &Builder, Value *handle, hlsl::OP *OP, Value *status, Value *bufIdx, Value *baseOffset, const DataLayout &DL, std::vector<Value *> &bufLds, unsigned baseAlign, bool isScalarTy) { unsigned EltSize = DL.getTypeAllocSize(VecEltTy); unsigned alignment = std::min(baseAlign, EltSize); Constant *alignmentVal = OP->GetI32Const(alignment); if (baseOffset == nullptr) { baseOffset = OP->GetU32Const(0); } std::vector<Value *> elts(ElemCount); unsigned rest = (ElemCount % 4); for (unsigned i = 0; i < ElemCount - rest; i += 4) { Value *ResultElts[4]; Value *bufLd = GenerateRawBufLd(handle, bufIdx, baseOffset, status, VecEltTy, ResultElts, OP, Builder, 4, alignmentVal); bufLds.emplace_back(bufLd); elts[i] = ResultElts[0]; elts[i + 1] = ResultElts[1]; elts[i + 2] = ResultElts[2]; elts[i + 3] = ResultElts[3]; baseOffset = Builder.CreateAdd(baseOffset, OP->GetU32Const(4 * EltSize)); } if (rest) { Value *ResultElts[4]; Value *bufLd = GenerateRawBufLd(handle, bufIdx, baseOffset, status, VecEltTy, ResultElts, OP, Builder, rest, alignmentVal); bufLds.emplace_back(bufLd); for (unsigned i = 0; i < rest; i++) elts[ElemCount - rest + i] = ResultElts[i]; } // If the expected return type is scalar then skip building a vector if (isScalarTy) { return elts[0]; } Value *Vec = HLMatrixLower::BuildVector(VecEltTy, elts, Builder); return Vec; } Value *TranslateStructBufMatLd(Type *matType, IRBuilder<> &Builder, Value *handle, hlsl::OP *OP, Value *status, Value *bufIdx, Value *baseOffset, const DataLayout &DL) { HLMatrixType MatTy = HLMatrixType::cast(matType); Type *EltTy = MatTy.getElementTypeForMem(); unsigned matSize = MatTy.getNumElements(); std::vector<Value *> bufLds; Value *Vec = TranslateRawBufVecLd(EltTy, matSize, Builder, handle, OP, status, bufIdx, baseOffset, DL, bufLds, /*baseAlign (in bytes)*/ 8); Vec = MatTy.emitLoweredMemToReg(Vec, Builder); return Vec; } void TranslateStructBufMatSt(Type *matType, IRBuilder<> &Builder, Value *handle, hlsl::OP *OP, Value *bufIdx, Value *baseOffset, Value *val, const DataLayout &DL) { HLMatrixType MatTy = HLMatrixType::cast(matType); Type *EltTy = MatTy.getElementTypeForMem(); val = MatTy.emitLoweredRegToMem(val, Builder); unsigned EltSize = DL.getTypeAllocSize(EltTy); Constant *Alignment = OP->GetI32Const(EltSize); Value *offset = baseOffset; if (baseOffset == nullptr) offset = OP->GetU32Const(0); unsigned matSize = MatTy.getNumElements(); Value *undefElt = UndefValue::get(EltTy); unsigned storeSize = matSize; if (matSize % 4) { storeSize = matSize + 4 - (matSize & 3); } std::vector<Value *> elts(storeSize, undefElt); for (unsigned i = 0; i < matSize; i++) elts[i] = Builder.CreateExtractElement(val, i); for (unsigned i = 0; i < matSize; i += 4) { uint8_t mask = 0; for (unsigned j = 0; j < 4 && (i + j) < matSize; j++) { if (elts[i + j] != undefElt) mask |= (1 << j); } GenerateStructBufSt(handle, bufIdx, offset, EltTy, OP, Builder, {elts[i], elts[i + 1], elts[i + 2], elts[i + 3]}, mask, Alignment); // Update offset by 4*4bytes. offset = Builder.CreateAdd(offset, OP->GetU32Const(4 * EltSize)); } } void TranslateStructBufMatLdSt(CallInst *CI, Value *handle, hlsl::OP *OP, Value *status, Value *bufIdx, Value *baseOffset, const DataLayout &DL) { IRBuilder<> Builder(CI); HLOpcodeGroup group = hlsl::GetHLOpcodeGroupByName(CI->getCalledFunction()); unsigned opcode = GetHLOpcode(CI); DXASSERT_LOCALVAR(group, group == HLOpcodeGroup::HLMatLoadStore, "only translate matrix loadStore here."); HLMatLoadStoreOpcode matOp = static_cast<HLMatLoadStoreOpcode>(opcode); // Due to the current way the initial codegen generates matrix // orientation casts, the in-register vector matrix has already been // reordered based on the destination's row or column-major packing // orientation. switch (matOp) { case HLMatLoadStoreOpcode::RowMatLoad: case HLMatLoadStoreOpcode::ColMatLoad: { Value *ptr = CI->getArgOperand(HLOperandIndex::kMatLoadPtrOpIdx); Value *NewLd = TranslateStructBufMatLd( ptr->getType()->getPointerElementType(), Builder, handle, OP, status, bufIdx, baseOffset, DL); CI->replaceAllUsesWith(NewLd); } break; case HLMatLoadStoreOpcode::RowMatStore: case HLMatLoadStoreOpcode::ColMatStore: { Value *ptr = CI->getArgOperand(HLOperandIndex::kMatStoreDstPtrOpIdx); Value *val = CI->getArgOperand(HLOperandIndex::kMatStoreValOpIdx); TranslateStructBufMatSt(ptr->getType()->getPointerElementType(), Builder, handle, OP, bufIdx, baseOffset, val, DL); } break; } CI->eraseFromParent(); } void TranslateStructBufSubscriptUser(Instruction *user, Value *handle, HLResource::Kind ResKind, Value *bufIdx, Value *baseOffset, Value *status, hlsl::OP *OP, const DataLayout &DL); // For case like mat[i][j]. // IdxList is [i][0], [i][1], [i][2],[i][3]. // Idx is j. // return [i][j] not mat[i][j] because resource ptr and temp ptr need different // code gen. static Value *LowerGEPOnMatIndexListToIndex(llvm::GetElementPtrInst *GEP, ArrayRef<Value *> IdxList) { IRBuilder<> Builder(GEP); Value *zero = Builder.getInt32(0); DXASSERT(GEP->getNumIndices() == 2, "must have 2 level"); Value *baseIdx = (GEP->idx_begin())->get(); DXASSERT_LOCALVAR(baseIdx, baseIdx == zero, "base index must be 0"); Value *Idx = (GEP->idx_begin() + 1)->get(); if (ConstantInt *immIdx = dyn_cast<ConstantInt>(Idx)) { return IdxList[immIdx->getSExtValue()]; } else { IRBuilder<> AllocaBuilder( GEP->getParent()->getParent()->getEntryBlock().getFirstInsertionPt()); unsigned size = IdxList.size(); // Store idxList to temp array. ArrayType *AT = ArrayType::get(IdxList[0]->getType(), size); Value *tempArray = AllocaBuilder.CreateAlloca(AT); for (unsigned i = 0; i < size; i++) { Value *EltPtr = Builder.CreateGEP(tempArray, {zero, Builder.getInt32(i)}); Builder.CreateStore(IdxList[i], EltPtr); } // Load the idx. Value *GEPOffset = Builder.CreateGEP(tempArray, {zero, Idx}); return Builder.CreateLoad(GEPOffset); } } // subscript operator for matrix of struct element. void TranslateStructBufMatSubscript(CallInst *CI, Value *handle, HLResource::Kind ResKind, Value *bufIdx, Value *baseOffset, Value *status, hlsl::OP *hlslOP, const DataLayout &DL) { unsigned opcode = GetHLOpcode(CI); IRBuilder<> subBuilder(CI); HLSubscriptOpcode subOp = static_cast<HLSubscriptOpcode>(opcode); Value *basePtr = CI->getArgOperand(HLOperandIndex::kMatSubscriptMatOpIdx); HLMatrixType MatTy = HLMatrixType::cast(basePtr->getType()->getPointerElementType()); Type *EltTy = MatTy.getElementTypeForReg(); Constant *alignment = hlslOP->GetI32Const(DL.getTypeAllocSize(EltTy)); Value *EltByteSize = ConstantInt::get( baseOffset->getType(), GetEltTypeByteSizeForConstBuf(EltTy, DL)); Value *idx = CI->getArgOperand(HLOperandIndex::kMatSubscriptSubOpIdx); Type *resultType = CI->getType()->getPointerElementType(); unsigned resultSize = 1; if (resultType->isVectorTy()) resultSize = resultType->getVectorNumElements(); DXASSERT(resultSize <= 16, "up to 4x4 elements in vector or matrix"); assert(resultSize <= 16); std::vector<Value *> idxList(resultSize); switch (subOp) { case HLSubscriptOpcode::ColMatSubscript: case HLSubscriptOpcode::RowMatSubscript: { for (unsigned i = 0; i < resultSize; i++) { Value *offset = CI->getArgOperand(HLOperandIndex::kMatSubscriptSubOpIdx + i); offset = subBuilder.CreateMul(offset, EltByteSize); idxList[i] = subBuilder.CreateAdd(baseOffset, offset); } } break; case HLSubscriptOpcode::RowMatElement: case HLSubscriptOpcode::ColMatElement: { Constant *EltIdxs = cast<Constant>(idx); for (unsigned i = 0; i < resultSize; i++) { Value *offset = subBuilder.CreateMul(EltIdxs->getAggregateElement(i), EltByteSize); idxList[i] = subBuilder.CreateAdd(baseOffset, offset); } } break; default: DXASSERT(0, "invalid operation on const buffer"); break; } Value *undefElt = UndefValue::get(EltTy); for (auto U = CI->user_begin(); U != CI->user_end();) { Value *subsUser = *(U++); if (resultSize == 1) { TranslateStructBufSubscriptUser(cast<Instruction>(subsUser), handle, ResKind, bufIdx, idxList[0], status, hlslOP, DL); continue; } if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(subsUser)) { Value *GEPOffset = LowerGEPOnMatIndexListToIndex(GEP, idxList); for (auto gepU = GEP->user_begin(); gepU != GEP->user_end();) { Instruction *gepUserInst = cast<Instruction>(*(gepU++)); TranslateStructBufSubscriptUser(gepUserInst, handle, ResKind, bufIdx, GEPOffset, status, hlslOP, DL); } GEP->eraseFromParent(); } else if (StoreInst *stUser = dyn_cast<StoreInst>(subsUser)) { IRBuilder<> stBuilder(stUser); Value *Val = stUser->getValueOperand(); if (Val->getType()->isVectorTy()) { for (unsigned i = 0; i < resultSize; i++) { Value *EltVal = stBuilder.CreateExtractElement(Val, i); uint8_t mask = DXIL::kCompMask_X; GenerateStructBufSt(handle, bufIdx, idxList[i], EltTy, hlslOP, stBuilder, {EltVal, undefElt, undefElt, undefElt}, mask, alignment); } } else { uint8_t mask = DXIL::kCompMask_X; GenerateStructBufSt(handle, bufIdx, idxList[0], EltTy, hlslOP, stBuilder, {Val, undefElt, undefElt, undefElt}, mask, alignment); } stUser->eraseFromParent(); } else { // Must be load here. LoadInst *ldUser = cast<LoadInst>(subsUser); IRBuilder<> ldBuilder(ldUser); Value *ldData = UndefValue::get(resultType); if (resultType->isVectorTy()) { for (unsigned i = 0; i < resultSize; i++) { Value *ResultElt; // TODO: This can be inefficient for row major matrix load GenerateRawBufLd(handle, bufIdx, idxList[i], /*status*/ nullptr, EltTy, ResultElt, hlslOP, ldBuilder, 1, alignment); ldData = ldBuilder.CreateInsertElement(ldData, ResultElt, i); } } else { GenerateRawBufLd(handle, bufIdx, idxList[0], /*status*/ nullptr, EltTy, ldData, hlslOP, ldBuilder, 4, alignment); } ldUser->replaceAllUsesWith(ldData); ldUser->eraseFromParent(); } } CI->eraseFromParent(); } void TranslateStructBufSubscriptUser(Instruction *user, Value *handle, HLResource::Kind ResKind, Value *bufIdx, Value *baseOffset, Value *status, hlsl::OP *OP, const DataLayout &DL) { IRBuilder<> Builder(user); if (CallInst *userCall = dyn_cast<CallInst>(user)) { HLOpcodeGroup group = // user call? hlsl::GetHLOpcodeGroupByName(userCall->getCalledFunction()); unsigned opcode = GetHLOpcode(userCall); // For case element type of structure buffer is not structure type. if (baseOffset == nullptr) baseOffset = OP->GetU32Const(0); if (group == HLOpcodeGroup::HLIntrinsic) { IntrinsicOp IOP = static_cast<IntrinsicOp>(opcode); switch (IOP) { case IntrinsicOp::MOP_Load: { if (userCall->getType()->isPointerTy()) { // Struct will return pointers which like [] } else { // Use builtin types on structuredBuffer. } DXASSERT(0, "not implement yet"); } break; case IntrinsicOp::IOP_InterlockedAdd: { AtomicHelper helper(userCall, DXIL::OpCode::AtomicBinOp, handle, bufIdx, baseOffset); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::Add, Builder, OP); } break; case IntrinsicOp::IOP_InterlockedAnd: { AtomicHelper helper(userCall, DXIL::OpCode::AtomicBinOp, handle, bufIdx, baseOffset); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::And, Builder, OP); } break; case IntrinsicOp::IOP_InterlockedExchange: { Type *opType = nullptr; PointerType *ptrType = dyn_cast<PointerType>( userCall->getArgOperand(HLOperandIndex::kInterlockedDestOpIndex) ->getType()); if (ptrType && ptrType->getElementType()->isFloatTy()) opType = Type::getInt32Ty(userCall->getContext()); AtomicHelper helper(userCall, DXIL::OpCode::AtomicBinOp, handle, bufIdx, baseOffset, opType); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::Exchange, Builder, OP); } break; case IntrinsicOp::IOP_InterlockedMax: { AtomicHelper helper(userCall, DXIL::OpCode::AtomicBinOp, handle, bufIdx, baseOffset); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::IMax, Builder, OP); } break; case IntrinsicOp::IOP_InterlockedMin: { AtomicHelper helper(userCall, DXIL::OpCode::AtomicBinOp, handle, bufIdx, baseOffset); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::IMin, Builder, OP); } break; case IntrinsicOp::IOP_InterlockedUMax: { AtomicHelper helper(userCall, DXIL::OpCode::AtomicBinOp, handle, bufIdx, baseOffset); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::UMax, Builder, OP); } break; case IntrinsicOp::IOP_InterlockedUMin: { AtomicHelper helper(userCall, DXIL::OpCode::AtomicBinOp, handle, bufIdx, baseOffset); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::UMin, Builder, OP); } break; case IntrinsicOp::IOP_InterlockedOr: { AtomicHelper helper(userCall, DXIL::OpCode::AtomicBinOp, handle, bufIdx, baseOffset); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::Or, Builder, OP); } break; case IntrinsicOp::IOP_InterlockedXor: { AtomicHelper helper(userCall, DXIL::OpCode::AtomicBinOp, handle, bufIdx, baseOffset); TranslateAtomicBinaryOperation(helper, DXIL::AtomicBinOpCode::Xor, Builder, OP); } break; case IntrinsicOp::IOP_InterlockedCompareStore: case IntrinsicOp::IOP_InterlockedCompareExchange: { AtomicHelper helper(userCall, DXIL::OpCode::AtomicCompareExchange, handle, bufIdx, baseOffset); TranslateAtomicCmpXChg(helper, Builder, OP); } break; case IntrinsicOp::IOP_InterlockedCompareStoreFloatBitwise: case IntrinsicOp::IOP_InterlockedCompareExchangeFloatBitwise: { Type *i32Ty = Type::getInt32Ty(userCall->getContext()); AtomicHelper helper(userCall, DXIL::OpCode::AtomicCompareExchange, handle, bufIdx, baseOffset, i32Ty); TranslateAtomicCmpXChg(helper, Builder, OP); } break; default: DXASSERT(0, "invalid opcode"); break; } userCall->eraseFromParent(); } else if (group == HLOpcodeGroup::HLMatLoadStore) TranslateStructBufMatLdSt(userCall, handle, OP, status, bufIdx, baseOffset, DL); else if (group == HLOpcodeGroup::HLSubscript) { TranslateStructBufMatSubscript(userCall, handle, ResKind, bufIdx, baseOffset, status, OP, DL); } } else if (isa<LoadInst>(user) || isa<StoreInst>(user)) { LoadInst *ldInst = dyn_cast<LoadInst>(user); StoreInst *stInst = dyn_cast<StoreInst>(user); Type *Ty = isa<LoadInst>(user) ? ldInst->getType() : stInst->getValueOperand()->getType(); Type *pOverloadTy = Ty->getScalarType(); Value *offset = baseOffset; unsigned arraySize = 1; Value *eltSize = nullptr; if (pOverloadTy->isArrayTy()) { arraySize = pOverloadTy->getArrayNumElements(); eltSize = OP->GetU32Const( DL.getTypeAllocSize(pOverloadTy->getArrayElementType())); pOverloadTy = pOverloadTy->getArrayElementType()->getScalarType(); } if (ldInst) { auto LdElement = [=](Value *offset, IRBuilder<> &Builder) -> Value * { unsigned numComponents = 0; if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { numComponents = VTy->getNumElements(); } else { numComponents = 1; } Constant *alignment = OP->GetI32Const(DL.getTypeAllocSize(Ty->getScalarType())); if (ResKind == HLResource::Kind::TypedBuffer) { // Typed buffer cannot have offsets, they must be loaded all at once ResRetValueArray ResRet = GenerateTypedBufferLoad( handle, pOverloadTy, bufIdx, status, OP, Builder); return ExtractFromTypedBufferLoad(ResRet, Ty, offset, Builder); } else { Value *ResultElts[4]; GenerateRawBufLd(handle, bufIdx, offset, status, pOverloadTy, ResultElts, OP, Builder, numComponents, alignment); return ScalarizeElements(Ty, ResultElts, Builder); } }; Value *newLd = LdElement(offset, Builder); if (arraySize > 1) { newLd = Builder.CreateInsertValue(UndefValue::get(Ty), newLd, (uint64_t)0); for (unsigned i = 1; i < arraySize; i++) { offset = Builder.CreateAdd(offset, eltSize); Value *eltLd = LdElement(offset, Builder); newLd = Builder.CreateInsertValue(newLd, eltLd, i); } } ldInst->replaceAllUsesWith(newLd); } else { Value *val = stInst->getValueOperand(); auto StElement = [&](Value *offset, Value *val, IRBuilder<> &Builder) { Value *undefVal = llvm::UndefValue::get(pOverloadTy); Value *vals[] = {undefVal, undefVal, undefVal, undefVal}; uint8_t mask = 0; if (Ty->isVectorTy()) { unsigned vectorNumElements = Ty->getVectorNumElements(); DXASSERT(vectorNumElements <= 4, "up to 4 elements in vector"); assert(vectorNumElements <= 4); for (unsigned i = 0; i < vectorNumElements; i++) { vals[i] = Builder.CreateExtractElement(val, i); mask |= (1 << i); } } else { vals[0] = val; mask = DXIL::kCompMask_X; } Constant *alignment = OP->GetI32Const(DL.getTypeAllocSize(Ty->getScalarType())); GenerateStructBufSt(handle, bufIdx, offset, pOverloadTy, OP, Builder, vals, mask, alignment); }; if (arraySize > 1) val = Builder.CreateExtractValue(val, 0); StElement(offset, val, Builder); if (arraySize > 1) { val = stInst->getValueOperand(); for (unsigned i = 1; i < arraySize; i++) { offset = Builder.CreateAdd(offset, eltSize); Value *eltVal = Builder.CreateExtractValue(val, i); StElement(offset, eltVal, Builder); } } } user->eraseFromParent(); } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(user)) { // Recurse users for (auto U = BCI->user_begin(); U != BCI->user_end();) { Value *BCIUser = *(U++); TranslateStructBufSubscriptUser(cast<Instruction>(BCIUser), handle, ResKind, bufIdx, baseOffset, status, OP, DL); } BCI->eraseFromParent(); } else if (PHINode *Phi = dyn_cast<PHINode>(user)) { if (Phi->getNumIncomingValues() != 1) { dxilutil::EmitErrorOnInstruction( Phi, "Phi not supported for buffer subscript"); return; } // Since the phi only has a single value we can safely process its // users to translate the subscript. These single-value phis are // inserted by the lcssa pass. for (auto U = Phi->user_begin(); U != Phi->user_end();) { Value *PhiUser = *(U++); TranslateStructBufSubscriptUser(cast<Instruction>(PhiUser), handle, ResKind, bufIdx, baseOffset, status, OP, DL); } Phi->eraseFromParent(); } else { // should only used by GEP GetElementPtrInst *GEP = cast<GetElementPtrInst>(user); Type *Ty = GEP->getType()->getPointerElementType(); Value *offset = dxilutil::GEPIdxToOffset(GEP, Builder, OP, DL); DXASSERT_LOCALVAR(Ty, offset->getType() == Type::getInt32Ty(Ty->getContext()), "else bitness is wrong"); offset = Builder.CreateAdd(offset, baseOffset); for (auto U = GEP->user_begin(); U != GEP->user_end();) { Value *GEPUser = *(U++); TranslateStructBufSubscriptUser(cast<Instruction>(GEPUser), handle, ResKind, bufIdx, offset, status, OP, DL); } // delete the inst GEP->eraseFromParent(); } } void TranslateStructBufSubscript(CallInst *CI, Value *handle, Value *status, hlsl::OP *OP, HLResource::Kind ResKind, const DataLayout &DL) { Value *subscriptIndex = CI->getArgOperand(HLOperandIndex::kSubscriptIndexOpIdx); Value *bufIdx = nullptr; Value *offset = nullptr; if (ResKind == HLResource::Kind::RawBuffer) { offset = subscriptIndex; } else { // StructuredBuffer, TypedBuffer, etc. bufIdx = subscriptIndex; offset = OP->GetU32Const(0); } for (auto U = CI->user_begin(); U != CI->user_end();) { Value *user = *(U++); TranslateStructBufSubscriptUser(cast<Instruction>(user), handle, ResKind, bufIdx, offset, status, OP, DL); } } } // namespace // HLSubscript. namespace { Value *TranslateTypedBufLoad(CallInst *CI, DXIL::ResourceKind RK, DXIL::ResourceClass RC, Value *handle, LoadInst *ldInst, IRBuilder<> &Builder, hlsl::OP *hlslOP, const DataLayout &DL) { ResLoadHelper ldHelper(CI, RK, RC, handle, IntrinsicOp::MOP_Load, /*bForSubscript*/ true); // Default sampleIdx for 2DMS textures. if (RK == DxilResource::Kind::Texture2DMS || RK == DxilResource::Kind::Texture2DMSArray) ldHelper.mipLevel = hlslOP->GetU32Const(0); // use ldInst as retVal ldHelper.retVal = ldInst; TranslateLoad(ldHelper, RK, Builder, hlslOP, DL); // delete the ld ldInst->eraseFromParent(); return ldHelper.retVal; } Value *UpdateVectorElt(Value *VecVal, Value *EltVal, Value *EltIdx, unsigned vectorSize, Instruction *InsertPt) { IRBuilder<> Builder(InsertPt); if (ConstantInt *CEltIdx = dyn_cast<ConstantInt>(EltIdx)) { VecVal = Builder.CreateInsertElement(VecVal, EltVal, CEltIdx->getLimitedValue()); } else { BasicBlock *BB = InsertPt->getParent(); BasicBlock *EndBB = BB->splitBasicBlock(InsertPt); TerminatorInst *TI = BB->getTerminator(); IRBuilder<> SwitchBuilder(TI); LLVMContext &Ctx = InsertPt->getContext(); SwitchInst *Switch = SwitchBuilder.CreateSwitch(EltIdx, EndBB, vectorSize); TI->eraseFromParent(); Function *F = EndBB->getParent(); IRBuilder<> endSwitchBuilder(EndBB->begin()); Type *Ty = VecVal->getType(); PHINode *VecPhi = endSwitchBuilder.CreatePHI(Ty, vectorSize + 1); for (unsigned i = 0; i < vectorSize; i++) { BasicBlock *CaseBB = BasicBlock::Create(Ctx, "case", F, EndBB); Switch->addCase(SwitchBuilder.getInt32(i), CaseBB); IRBuilder<> CaseBuilder(CaseBB); Value *CaseVal = CaseBuilder.CreateInsertElement(VecVal, EltVal, i); VecPhi->addIncoming(CaseVal, CaseBB); CaseBuilder.CreateBr(EndBB); } VecPhi->addIncoming(VecVal, BB); VecVal = VecPhi; } return VecVal; } void TranslateDefaultSubscript(CallInst *CI, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { Value *ptr = CI->getArgOperand(HLOperandIndex::kSubscriptObjectOpIdx); hlsl::OP *hlslOP = &helper.hlslOP; // Resource ptr. Value *handle = ptr; DXIL::ResourceClass RC = pObjHelper->GetRC(handle); DXIL::ResourceKind RK = pObjHelper->GetRK(handle); Type *Ty = CI->getType()->getPointerElementType(); for (auto It = CI->user_begin(); It != CI->user_end();) { User *user = *(It++); Instruction *I = cast<Instruction>(user); IRBuilder<> Builder(I); if (LoadInst *ldInst = dyn_cast<LoadInst>(user)) { TranslateTypedBufLoad(CI, RK, RC, handle, ldInst, Builder, hlslOP, helper.dataLayout); } else if (StoreInst *stInst = dyn_cast<StoreInst>(user)) { Value *val = stInst->getValueOperand(); TranslateStore(RK, handle, val, CI->getArgOperand(HLOperandIndex::kStoreOffsetOpIdx), Builder, hlslOP); // delete the st stInst->eraseFromParent(); } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(user)) { // Must be vector type here. unsigned vectorSize = Ty->getVectorNumElements(); DXASSERT_NOMSG(GEP->getNumIndices() == 2); Use *GEPIdx = GEP->idx_begin(); GEPIdx++; Value *EltIdx = *GEPIdx; for (auto GEPIt = GEP->user_begin(); GEPIt != GEP->user_end();) { User *GEPUser = *(GEPIt++); if (StoreInst *SI = dyn_cast<StoreInst>(GEPUser)) { IRBuilder<> StBuilder(SI); // Generate Ld. LoadInst *tmpLd = StBuilder.CreateLoad(CI); Value *ldVal = TranslateTypedBufLoad( CI, RK, RC, handle, tmpLd, StBuilder, hlslOP, helper.dataLayout); // Update vector. ldVal = UpdateVectorElt(ldVal, SI->getValueOperand(), EltIdx, vectorSize, SI); // Generate St. // Reset insert point, UpdateVectorElt may move SI to different block. StBuilder.SetInsertPoint(SI); TranslateStore(RK, handle, ldVal, CI->getArgOperand(HLOperandIndex::kStoreOffsetOpIdx), StBuilder, hlslOP); SI->eraseFromParent(); continue; } if (LoadInst *LI = dyn_cast<LoadInst>(GEPUser)) { IRBuilder<> LdBuilder(LI); // Generate tmp vector load with vector type & translate it LoadInst *tmpLd = LdBuilder.CreateLoad(CI); Value *ldVal = TranslateTypedBufLoad( CI, RK, RC, handle, tmpLd, LdBuilder, hlslOP, helper.dataLayout); // get the single element ldVal = GenerateVecEltFromGEP(ldVal, GEP, LdBuilder, /*bInsertLdNextToGEP*/ false); LI->replaceAllUsesWith(ldVal); LI->eraseFromParent(); continue; } // Invalid operations. Translated = false; dxilutil::EmitErrorOnInstruction(GEP, "Invalid operation on typed buffer."); return; } GEP->eraseFromParent(); } else { CallInst *userCall = cast<CallInst>(user); HLOpcodeGroup group = hlsl::GetHLOpcodeGroupByName(userCall->getCalledFunction()); unsigned opcode = hlsl::GetHLOpcode(userCall); if (group == HLOpcodeGroup::HLIntrinsic) { IntrinsicOp IOP = static_cast<IntrinsicOp>(opcode); if (RC == DXIL::ResourceClass::SRV) { // Invalid operations. Translated = false; dxilutil::EmitErrorOnInstruction(userCall, "Invalid operation on SRV."); return; } switch (IOP) { case IntrinsicOp::IOP_InterlockedAdd: { ResLoadHelper helper(CI, RK, RC, handle, IntrinsicOp::IOP_InterlockedAdd); AtomicHelper atomHelper(userCall, DXIL::OpCode::AtomicBinOp, handle, helper.addr, /*offset*/ nullptr); TranslateAtomicBinaryOperation(atomHelper, DXIL::AtomicBinOpCode::Add, Builder, hlslOP); } break; case IntrinsicOp::IOP_InterlockedAnd: { ResLoadHelper helper(CI, RK, RC, handle, IntrinsicOp::IOP_InterlockedAnd); AtomicHelper atomHelper(userCall, DXIL::OpCode::AtomicBinOp, handle, helper.addr, /*offset*/ nullptr); TranslateAtomicBinaryOperation(atomHelper, DXIL::AtomicBinOpCode::And, Builder, hlslOP); } break; case IntrinsicOp::IOP_InterlockedExchange: { ResLoadHelper helper(CI, RK, RC, handle, IntrinsicOp::IOP_InterlockedExchange); Type *opType = nullptr; PointerType *ptrType = dyn_cast<PointerType>( userCall->getArgOperand(HLOperandIndex::kInterlockedDestOpIndex) ->getType()); if (ptrType && ptrType->getElementType()->isFloatTy()) opType = Type::getInt32Ty(userCall->getContext()); AtomicHelper atomHelper(userCall, DXIL::OpCode::AtomicBinOp, handle, helper.addr, /*offset*/ nullptr, opType); TranslateAtomicBinaryOperation( atomHelper, DXIL::AtomicBinOpCode::Exchange, Builder, hlslOP); } break; case IntrinsicOp::IOP_InterlockedMax: { ResLoadHelper helper(CI, RK, RC, handle, IntrinsicOp::IOP_InterlockedMax); AtomicHelper atomHelper(userCall, DXIL::OpCode::AtomicBinOp, handle, helper.addr, /*offset*/ nullptr); TranslateAtomicBinaryOperation( atomHelper, DXIL::AtomicBinOpCode::IMax, Builder, hlslOP); } break; case IntrinsicOp::IOP_InterlockedMin: { ResLoadHelper helper(CI, RK, RC, handle, IntrinsicOp::IOP_InterlockedMin); AtomicHelper atomHelper(userCall, DXIL::OpCode::AtomicBinOp, handle, helper.addr, /*offset*/ nullptr); TranslateAtomicBinaryOperation( atomHelper, DXIL::AtomicBinOpCode::IMin, Builder, hlslOP); } break; case IntrinsicOp::IOP_InterlockedUMax: { ResLoadHelper helper(CI, RK, RC, handle, IntrinsicOp::IOP_InterlockedUMax); AtomicHelper atomHelper(userCall, DXIL::OpCode::AtomicBinOp, handle, helper.addr, /*offset*/ nullptr); TranslateAtomicBinaryOperation( atomHelper, DXIL::AtomicBinOpCode::UMax, Builder, hlslOP); } break; case IntrinsicOp::IOP_InterlockedUMin: { ResLoadHelper helper(CI, RK, RC, handle, IntrinsicOp::IOP_InterlockedUMin); AtomicHelper atomHelper(userCall, DXIL::OpCode::AtomicBinOp, handle, helper.addr, /*offset*/ nullptr); TranslateAtomicBinaryOperation( atomHelper, DXIL::AtomicBinOpCode::UMin, Builder, hlslOP); } break; case IntrinsicOp::IOP_InterlockedOr: { ResLoadHelper helper(CI, RK, RC, handle, IntrinsicOp::IOP_InterlockedOr); AtomicHelper atomHelper(userCall, DXIL::OpCode::AtomicBinOp, handle, helper.addr, /*offset*/ nullptr); TranslateAtomicBinaryOperation(atomHelper, DXIL::AtomicBinOpCode::Or, Builder, hlslOP); } break; case IntrinsicOp::IOP_InterlockedXor: { ResLoadHelper helper(CI, RK, RC, handle, IntrinsicOp::IOP_InterlockedXor); AtomicHelper atomHelper(userCall, DXIL::OpCode::AtomicBinOp, handle, helper.addr, /*offset*/ nullptr); TranslateAtomicBinaryOperation(atomHelper, DXIL::AtomicBinOpCode::Xor, Builder, hlslOP); } break; case IntrinsicOp::IOP_InterlockedCompareStore: case IntrinsicOp::IOP_InterlockedCompareExchange: { ResLoadHelper helper(CI, RK, RC, handle, IntrinsicOp::IOP_InterlockedCompareExchange); AtomicHelper atomHelper(userCall, DXIL::OpCode::AtomicCompareExchange, handle, helper.addr, /*offset*/ nullptr); TranslateAtomicCmpXChg(atomHelper, Builder, hlslOP); } break; case IntrinsicOp::IOP_InterlockedCompareStoreFloatBitwise: case IntrinsicOp::IOP_InterlockedCompareExchangeFloatBitwise: { Type *i32Ty = Type::getInt32Ty(userCall->getContext()); ResLoadHelper helper(CI, RK, RC, handle, IntrinsicOp::IOP_InterlockedCompareExchange); AtomicHelper atomHelper(userCall, DXIL::OpCode::AtomicCompareExchange, handle, helper.addr, /*offset*/ nullptr, i32Ty); TranslateAtomicCmpXChg(atomHelper, Builder, hlslOP); } break; default: DXASSERT(0, "invalid opcode"); break; } } else { DXASSERT(0, "invalid group"); } userCall->eraseFromParent(); } } } } // namespace void TranslateHLSubscript(CallInst *CI, HLSubscriptOpcode opcode, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper, bool &Translated) { if (CI->user_empty()) { Translated = true; return; } hlsl::OP *hlslOP = &helper.hlslOP; Value *ptr = CI->getArgOperand(HLOperandIndex::kSubscriptObjectOpIdx); if (opcode == HLSubscriptOpcode::CBufferSubscript) { dxilutil::MergeGepUse(CI); // Resource ptr. Value *handle = CI->getArgOperand(HLOperandIndex::kSubscriptObjectOpIdx); TranslateCBOperationsLegacy(handle, CI, hlslOP, helper.dxilTypeSys, helper.dataLayout, pObjHelper); Translated = true; return; } else if (opcode == HLSubscriptOpcode::DoubleSubscript) { // Resource ptr. Value *handle = ptr; DXIL::ResourceKind RK = pObjHelper->GetRK(handle); Value *coord = CI->getArgOperand(HLOperandIndex::kSubscriptIndexOpIdx); Value *mipLevel = CI->getArgOperand(HLOperandIndex::kDoubleSubscriptMipLevelOpIdx); auto U = CI->user_begin(); DXASSERT(CI->hasOneUse(), "subscript should only have one use"); IRBuilder<> Builder(CI); if (LoadInst *ldInst = dyn_cast<LoadInst>(*U)) { ResLoadHelper ldHelper(ldInst, handle, coord, mipLevel); TranslateLoad(ldHelper, RK, Builder, hlslOP, helper.dataLayout); ldInst->eraseFromParent(); } else { StoreInst *stInst = cast<StoreInst>(*U); Value *val = stInst->getValueOperand(); TranslateStore(RK, handle, val, CI->getArgOperand(HLOperandIndex::kStoreOffsetOpIdx), Builder, hlslOP, mipLevel); stInst->eraseFromParent(); } Translated = true; return; } else { Type *HandleTy = hlslOP->GetHandleType(); if (ptr->getType() == hlslOP->GetNodeRecordHandleType()) { DXASSERT(false, "Shouldn't get here, NodeRecord subscripts should have " "been lowered in LowerRecordAccessToGetNodeRecordPtr"); return; } if (ptr->getType() == HandleTy) { // Resource ptr. Value *handle = ptr; DXIL::ResourceKind RK = DxilResource::Kind::Invalid; Type *ObjTy = nullptr; Type *RetTy = nullptr; RK = pObjHelper->GetRK(handle); if (RK == DxilResource::Kind::Invalid) { Translated = false; return; } ObjTy = pObjHelper->GetResourceType(handle); RetTy = ObjTy->getStructElementType(0); Translated = true; if (DXIL::IsStructuredBuffer(RK)) { TranslateStructBufSubscript(CI, handle, /*status*/ nullptr, hlslOP, RK, helper.dataLayout); } else if (RetTy->isAggregateType() && RK == DxilResource::Kind::TypedBuffer) { TranslateStructBufSubscript(CI, handle, /*status*/ nullptr, hlslOP, RK, helper.dataLayout); // Clear offset for typed buf. for (auto User = handle->user_begin(); User != handle->user_end();) { CallInst *CI = cast<CallInst>(*(User++)); // Skip not lowered HL functions. if (hlsl::GetHLOpcodeGroupByName(CI->getCalledFunction()) != HLOpcodeGroup::NotHL) continue; switch (hlslOP->GetDxilOpFuncCallInst(CI)) { case DXIL::OpCode::BufferLoad: { CI->setArgOperand(DXIL::OperandIndex::kBufferLoadCoord1OpIdx, UndefValue::get(helper.i32Ty)); } break; case DXIL::OpCode::BufferStore: { CI->setArgOperand(DXIL::OperandIndex::kBufferStoreCoord1OpIdx, UndefValue::get(helper.i32Ty)); } break; case DXIL::OpCode::AtomicBinOp: { CI->setArgOperand(DXIL::OperandIndex::kAtomicBinOpCoord1OpIdx, UndefValue::get(helper.i32Ty)); } break; case DXIL::OpCode::AtomicCompareExchange: { CI->setArgOperand(DXIL::OperandIndex::kAtomicCmpExchangeCoord1OpIdx, UndefValue::get(helper.i32Ty)); } break; case DXIL::OpCode::RawBufferLoad: { // Structured buffer inside a typed buffer must be converted to // typed buffer load. Typed buffer load is equivalent to raw buffer // load, except there is no mask. StructType *STy = cast<StructType>(CI->getFunctionType()->getReturnType()); Type *ETy = STy->getElementType(0); SmallVector<Value *, 4> Args; Args.emplace_back( hlslOP->GetI32Const((unsigned)DXIL::OpCode::BufferLoad)); Args.emplace_back(CI->getArgOperand(1)); // handle Args.emplace_back(CI->getArgOperand(2)); // index Args.emplace_back(UndefValue::get(helper.i32Ty)); // offset IRBuilder<> builder(CI); Function *newFunction = hlslOP->GetOpFunc(DXIL::OpCode::BufferLoad, ETy); CallInst *newCall = builder.CreateCall(newFunction, Args); CI->replaceAllUsesWith(newCall); CI->eraseFromParent(); } break; default: DXASSERT(0, "Invalid operation on resource handle"); break; } } } else { TranslateDefaultSubscript(CI, helper, pObjHelper, Translated); } return; } } Value *basePtr = CI->getArgOperand(HLOperandIndex::kMatSubscriptMatOpIdx); if (IsLocalVariablePtr(basePtr) || IsSharedMemPtr(basePtr)) { // Translate matrix into vector of array for share memory or local // variable should be done in HLMatrixLowerPass DXASSERT_NOMSG(0); Translated = true; return; } // Other case should be take care in TranslateStructBufSubscript or // TranslateCBOperations. Translated = false; return; } void TranslateSubscriptOperation(Function *F, HLOperationLowerHelper &helper, HLObjectOperationLowerHelper *pObjHelper) { for (auto U = F->user_begin(); U != F->user_end();) { Value *user = *(U++); if (!isa<Instruction>(user)) continue; // must be call inst CallInst *CI = cast<CallInst>(user); unsigned opcode = GetHLOpcode(CI); bool Translated = true; TranslateHLSubscript(CI, static_cast<HLSubscriptOpcode>(opcode), helper, pObjHelper, Translated); if (Translated) { // delete the call DXASSERT(CI->use_empty(), "else TranslateHLSubscript didn't replace/erase uses"); CI->eraseFromParent(); } } } // Create BitCast if ptr, otherwise, create alloca of new type, write to bitcast // of alloca, and return load from alloca If bOrigAllocaTy is true: create // alloca of old type instead, write to alloca, and return load from bitcast of // alloca static Instruction *BitCastValueOrPtr(Value *V, Instruction *Insert, Type *Ty, bool bOrigAllocaTy = false, const Twine &Name = "") { IRBuilder<> Builder(Insert); if (Ty->isPointerTy()) { // If pointer, we can bitcast directly return cast<Instruction>(Builder.CreateBitCast(V, Ty, Name)); } else { // If value, we have to alloca, store to bitcast ptr, and load IRBuilder<> AllocaBuilder(dxilutil::FindAllocaInsertionPt(Insert)); Type *allocaTy = bOrigAllocaTy ? V->getType() : Ty; Type *otherTy = bOrigAllocaTy ? Ty : V->getType(); Instruction *allocaInst = AllocaBuilder.CreateAlloca(allocaTy); Instruction *bitCast = cast<Instruction>( Builder.CreateBitCast(allocaInst, otherTy->getPointerTo())); Builder.CreateStore(V, bOrigAllocaTy ? allocaInst : bitCast); return Builder.CreateLoad(bOrigAllocaTy ? bitCast : allocaInst, Name); } } static Instruction *CreateTransposeShuffle(IRBuilder<> &Builder, Value *vecVal, unsigned toRows, unsigned toCols) { SmallVector<int, 16> castMask(toCols * toRows); unsigned idx = 0; for (unsigned r = 0; r < toRows; r++) for (unsigned c = 0; c < toCols; c++) castMask[idx++] = c * toRows + r; return cast<Instruction>( Builder.CreateShuffleVector(vecVal, vecVal, castMask)); } void TranslateHLBuiltinOperation(Function *F, HLOperationLowerHelper &helper, hlsl::HLOpcodeGroup group, HLObjectOperationLowerHelper *pObjHelper) { if (group == HLOpcodeGroup::HLIntrinsic) { // map to dxil operations for (auto U = F->user_begin(); U != F->user_end();) { Value *User = *(U++); if (!isa<Instruction>(User)) continue; // must be call inst CallInst *CI = cast<CallInst>(User); // Keep the instruction to lower by other function. bool Translated = true; TranslateBuiltinIntrinsic(CI, helper, pObjHelper, Translated); if (Translated) { // delete the call DXASSERT(CI->use_empty(), "else TranslateBuiltinIntrinsic didn't replace/erase uses"); CI->eraseFromParent(); } } } else { if (group == HLOpcodeGroup::HLMatLoadStore) { // Both ld/st use arg1 for the pointer. Type *PtrTy = F->getFunctionType()->getParamType(HLOperandIndex::kMatLoadPtrOpIdx); if (PtrTy->getPointerAddressSpace() == DXIL::kTGSMAddrSpace) { // Translate matrix into vector of array for shared memory // variable should be done in HLMatrixLowerPass. if (!F->user_empty()) F->getContext().emitError("Fail to lower matrix load/store."); } else if (PtrTy->getPointerAddressSpace() == DXIL::kDefaultAddrSpace) { // Default address space may be function argument in lib target if (!F->user_empty()) { for (auto U = F->user_begin(); U != F->user_end();) { Value *User = *(U++); if (!isa<Instruction>(User)) continue; // must be call inst CallInst *CI = cast<CallInst>(User); IRBuilder<> Builder(CI); HLMatLoadStoreOpcode opcode = static_cast<HLMatLoadStoreOpcode>(hlsl::GetHLOpcode(CI)); switch (opcode) { case HLMatLoadStoreOpcode::ColMatStore: case HLMatLoadStoreOpcode::RowMatStore: { Value *vecVal = CI->getArgOperand(HLOperandIndex::kMatStoreValOpIdx); Value *matPtr = CI->getArgOperand(HLOperandIndex::kMatStoreDstPtrOpIdx); matPtr = SkipAddrSpaceCast(matPtr); unsigned addrSpace = cast<PointerType>(matPtr->getType())->getAddressSpace(); Value *castPtr = Builder.CreateBitCast( matPtr, vecVal->getType()->getPointerTo(addrSpace)); Builder.CreateStore(vecVal, castPtr); CI->eraseFromParent(); } break; case HLMatLoadStoreOpcode::ColMatLoad: case HLMatLoadStoreOpcode::RowMatLoad: { Value *matPtr = CI->getArgOperand(HLOperandIndex::kMatLoadPtrOpIdx); matPtr = SkipAddrSpaceCast(matPtr); unsigned addrSpace = cast<PointerType>(matPtr->getType())->getAddressSpace(); Value *castPtr = Builder.CreateBitCast( matPtr, CI->getType()->getPointerTo(addrSpace)); Value *vecVal = Builder.CreateLoad(castPtr); CI->replaceAllUsesWith(vecVal); CI->eraseFromParent(); } break; } } } } } else if (group == HLOpcodeGroup::HLCast) { // HLCast may be used on matrix value function argument in lib target if (!F->user_empty()) { for (auto U = F->user_begin(); U != F->user_end();) { Value *User = *(U++); if (!isa<Instruction>(User)) continue; // must be call inst CallInst *CI = cast<CallInst>(User); IRBuilder<> Builder(CI); HLCastOpcode opcode = static_cast<HLCastOpcode>(hlsl::GetHLOpcode(CI)); bool bTranspose = false; bool bColDest = false; switch (opcode) { case HLCastOpcode::RowMatrixToColMatrix: bColDest = true; LLVM_FALLTHROUGH; case HLCastOpcode::ColMatrixToRowMatrix: bTranspose = true; LLVM_FALLTHROUGH; case HLCastOpcode::ColMatrixToVecCast: case HLCastOpcode::RowMatrixToVecCast: { Value *matVal = CI->getArgOperand(HLOperandIndex::kInitFirstArgOpIdx); Value *vecVal = BitCastValueOrPtr(matVal, CI, CI->getType(), /*bOrigAllocaTy*/ false, matVal->getName()); if (bTranspose) { HLMatrixType MatTy = HLMatrixType::cast(matVal->getType()); unsigned row = MatTy.getNumRows(); unsigned col = MatTy.getNumColumns(); if (bColDest) std::swap(row, col); vecVal = CreateTransposeShuffle(Builder, vecVal, row, col); } CI->replaceAllUsesWith(vecVal); CI->eraseFromParent(); } break; } } } } else if (group == HLOpcodeGroup::HLSubscript) { TranslateSubscriptOperation(F, helper, pObjHelper); } // map to math function or llvm ir } } typedef std::unordered_map<llvm::Instruction *, llvm::Value *> HandleMap; static void TranslateHLExtension(Function *F, HLSLExtensionsCodegenHelper *helper, OP &hlslOp, HLObjectOperationLowerHelper &objHelper) { // Find all calls to the function F. // Store the calls in a vector for now to be replaced the loop below. // We use a two step "find then replace" to avoid removing uses while // iterating. SmallVector<CallInst *, 8> CallsToReplace; for (User *U : F->users()) { if (CallInst *CI = dyn_cast<CallInst>(U)) { CallsToReplace.push_back(CI); } } // Get the lowering strategy to use for this intrinsic. llvm::StringRef LowerStrategy = GetHLLowerStrategy(F); HLObjectExtensionLowerHelper extObjHelper(objHelper); ExtensionLowering lower(LowerStrategy, helper, hlslOp, extObjHelper); // Replace all calls that were successfully translated. for (CallInst *CI : CallsToReplace) { Value *Result = lower.Translate(CI); if (Result && Result != CI) { CI->replaceAllUsesWith(Result); CI->eraseFromParent(); } } } namespace hlsl { void TranslateBuiltinOperations( HLModule &HLM, HLSLExtensionsCodegenHelper *extCodegenHelper, std::unordered_set<Instruction *> &UpdateCounterSet) { HLOperationLowerHelper helper(HLM); HLObjectOperationLowerHelper objHelper = {HLM, UpdateCounterSet}; Module *M = HLM.GetModule(); SmallVector<Function *, 4> NonUniformResourceIndexIntrinsics; // generate dxil operation for (iplist<Function>::iterator F : M->getFunctionList()) { if (F->user_empty()) continue; if (!F->isDeclaration()) { continue; } hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroup(F); if (group == HLOpcodeGroup::NotHL) { // Nothing to do. continue; } if (group == HLOpcodeGroup::HLExtIntrinsic) { TranslateHLExtension(F, extCodegenHelper, helper.hlslOP, objHelper); continue; } if (group == HLOpcodeGroup::HLIntrinsic) { CallInst *CI = cast<CallInst>(*F->user_begin()); // must be call inst unsigned opcode = hlsl::GetHLOpcode(CI); if (opcode == (unsigned)IntrinsicOp::IOP_NonUniformResourceIndex) { NonUniformResourceIndexIntrinsics.push_back(F); continue; } } TranslateHLBuiltinOperation(F, helper, group, &objHelper); } // Translate last so value placed in NonUniformSet is still valid. if (!NonUniformResourceIndexIntrinsics.empty()) { for (auto F : NonUniformResourceIndexIntrinsics) { TranslateHLBuiltinOperation(F, helper, HLOpcodeGroup::HLIntrinsic, &objHelper); } } } void EmitGetNodeRecordPtrAndUpdateUsers(HLOperationLowerHelper &helper, CallInst *CI, Value *ArrayIndex) { IRBuilder<> Builder(CI); Value *opArg = nullptr; Value *Handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); opArg = Builder.getInt32((unsigned)DXIL::OpCode::GetNodeRecordPtr); StructType *origRecordUDT = cast<StructType>(cast<PointerType>(CI->getType())->getElementType()); Type *getNodeRecordPtrRT = origRecordUDT; // Translate node record type here auto findIt = helper.loweredTypes.find(origRecordUDT); if (findIt != helper.loweredTypes.end()) { getNodeRecordPtrRT = findIt->second; } else { getNodeRecordPtrRT = GetLoweredUDT(origRecordUDT, &helper.dxilTypeSys); if (origRecordUDT != getNodeRecordPtrRT) helper.loweredTypes[origRecordUDT] = getNodeRecordPtrRT; } getNodeRecordPtrRT = getNodeRecordPtrRT->getPointerTo(DXIL::kNodeRecordAddrSpace); Function *getNodeRecordPtr = helper.hlslOP.GetOpFunc( DXIL::OpCode::GetNodeRecordPtr, getNodeRecordPtrRT); Value *args[] = {opArg, Handle, ArrayIndex}; Value *NodeRecordPtr = Builder.CreateCall(getNodeRecordPtr, args); ReplaceUsesForLoweredUDT(CI, NodeRecordPtr); } void LowerRecordAccessToGetNodeRecordPtr(HLModule &HLM) { Module *M = HLM.GetModule(); HLOperationLowerHelper helper(HLM); for (iplist<Function>::iterator F : M->getFunctionList()) { if (F->user_empty()) continue; hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroup(F); if (group == HLOpcodeGroup::HLSubscript) { for (auto U = F->user_begin(); U != F->user_end();) { Value *User = *(U++); if (!isa<Instruction>(User)) continue; // must be call inst CallInst *CI = cast<CallInst>(User); HLSubscriptOpcode opcode = static_cast<HLSubscriptOpcode>(hlsl::GetHLOpcode(CI)); if (opcode != HLSubscriptOpcode::DefaultSubscript) continue; hlsl::OP *OP = &helper.hlslOP; Value *Handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); if (Handle->getType() != OP->GetNodeRecordHandleType()) { continue; } Value *Index = CI->getNumArgOperands() > 2 ? CI->getArgOperand(2) : ConstantInt::get(helper.i32Ty, 0); EmitGetNodeRecordPtrAndUpdateUsers(helper, CI, Index); CI->eraseFromParent(); } } } } } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilGenerationPass.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilGenerationPass.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // DxilGenerationPass implementation. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/DxilGenerationPass.h" #include "HLSignatureLower.h" #include "dxc/DXIL/DxilEntryProps.h" #include "dxc/DXIL/DxilInstructions.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/HLModule.h" #include "dxc/HLSL/HLOperationLower.h" #include "dxc/HLSL/HLOperations.h" #include "dxc/HLSL/HLSLExtensionsCodegenHelper.h" #include "dxc/Support/Global.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Support/Casting.h" #include "llvm/Transforms/Utils/SSAUpdater.h" #include <unordered_map> #include <unordered_set> #include <vector> using namespace llvm; using namespace hlsl; // TODO: use hlsl namespace for the most of this file. namespace { void SimplifyGlobalSymbol(GlobalVariable *GV) { Type *Ty = GV->getType()->getElementType(); if (!Ty->isArrayTy()) { // Make sure only 1 load of GV in each function. std::unordered_map<Function *, Instruction *> handleMapOnFunction; for (User *U : GV->users()) { if (LoadInst *LI = dyn_cast<LoadInst>(U)) { Function *F = LI->getParent()->getParent(); auto it = handleMapOnFunction.find(F); if (it == handleMapOnFunction.end()) { LI->moveBefore(dxilutil::FindAllocaInsertionPt(F)); handleMapOnFunction[F] = LI; } else { LI->replaceAllUsesWith(it->second); } } } } } void InitResourceBase(const DxilResourceBase *pSource, DxilResourceBase *pDest) { DXASSERT_NOMSG(pSource->GetClass() == pDest->GetClass()); pDest->SetKind(pSource->GetKind()); pDest->SetID(pSource->GetID()); pDest->SetSpaceID(pSource->GetSpaceID()); pDest->SetLowerBound(pSource->GetLowerBound()); pDest->SetRangeSize(pSource->GetRangeSize()); pDest->SetGlobalSymbol(pSource->GetGlobalSymbol()); pDest->SetGlobalName(pSource->GetGlobalName()); pDest->SetHandle(pSource->GetHandle()); pDest->SetHLSLType(pSource->GetHLSLType()); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(pSource->GetGlobalSymbol())) SimplifyGlobalSymbol(GV); } void InitResource(const DxilResource *pSource, DxilResource *pDest) { pDest->SetCompType(pSource->GetCompType()); pDest->SetSamplerFeedbackType(pSource->GetSamplerFeedbackType()); pDest->SetSampleCount(pSource->GetSampleCount()); pDest->SetElementStride(pSource->GetElementStride()); pDest->SetGloballyCoherent(pSource->IsGloballyCoherent()); pDest->SetHasCounter(pSource->HasCounter()); pDest->SetRW(pSource->IsRW()); pDest->SetROV(pSource->IsROV()); InitResourceBase(pSource, pDest); } void InitDxilModuleFromHLModule(HLModule &H, DxilModule &M, bool HasDebugInfo) { // Subsystems. unsigned ValMajor, ValMinor; H.GetValidatorVersion(ValMajor, ValMinor); M.SetValidatorVersion(ValMajor, ValMinor); M.SetShaderModel(H.GetShaderModel(), H.GetHLOptions().bUseMinPrecision); M.SetForceZeroStoreLifetimes(H.GetHLOptions().bForceZeroStoreLifetimes); // Entry function. if (!M.GetShaderModel()->IsLib()) { Function *EntryFn = H.GetEntryFunction(); M.SetEntryFunction(EntryFn); M.SetEntryFunctionName(H.GetEntryFunctionName()); } std::vector<GlobalVariable *> &LLVMUsed = M.GetLLVMUsed(); // Resources for (auto &&C : H.GetCBuffers()) { auto b = llvm::make_unique<DxilCBuffer>(); InitResourceBase(C.get(), b.get()); b->SetSize(C->GetSize()); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(b->GetGlobalSymbol())) LLVMUsed.emplace_back(GV); M.AddCBuffer(std::move(b)); } for (auto &&C : H.GetUAVs()) { auto b = llvm::make_unique<DxilResource>(); InitResource(C.get(), b.get()); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(b->GetGlobalSymbol())) LLVMUsed.emplace_back(GV); M.AddUAV(std::move(b)); } for (auto &&C : H.GetSRVs()) { auto b = llvm::make_unique<DxilResource>(); InitResource(C.get(), b.get()); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(b->GetGlobalSymbol())) LLVMUsed.emplace_back(GV); M.AddSRV(std::move(b)); } for (auto &&C : H.GetSamplers()) { auto b = llvm::make_unique<DxilSampler>(); InitResourceBase(C.get(), b.get()); b->SetSamplerKind(C->GetSamplerKind()); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(b->GetGlobalSymbol())) LLVMUsed.emplace_back(GV); M.AddSampler(std::move(b)); } // Signatures. M.ResetSerializedRootSignature(H.GetSerializedRootSignature()); // Subobjects. M.ResetSubobjects(H.ReleaseSubobjects()); // Shader properties. // bool m_bDisableOptimizations; M.SetDisableOptimization(H.GetHLOptions().bDisableOptimizations); M.SetLegacyResourceReservation(H.GetHLOptions().bLegacyResourceReservation); // bool m_bDisableMathRefactoring; // bool m_bEnableDoublePrecision; // bool m_bEnableDoubleExtensions; // M.CollectShaderFlags(); // bool m_bForceEarlyDepthStencil; // bool m_bEnableRawAndStructuredBuffers; // bool m_bEnableMSAD; // M.m_ShaderFlags.SetAllResourcesBound(H.GetHLOptions().bAllResourcesBound); // DXIL type system. M.ResetTypeSystem(H.ReleaseTypeSystem()); // Dxil OP. M.ResetOP(H.ReleaseOP()); // Keep llvm used. M.EmitLLVMUsed(); M.SetAllResourcesBound(H.GetHLOptions().bAllResourcesBound); M.SetResMayAlias(H.GetHLOptions().bResMayAlias); M.SetAutoBindingSpace(H.GetAutoBindingSpace()); // Update Validator Version M.UpgradeToMinValidatorVersion(); } class DxilGenerationPass : public ModulePass { HLModule *m_pHLModule; bool m_HasDbgInfo; HLSLExtensionsCodegenHelper *m_extensionsCodegenHelper; public: static char ID; // Pass identification, replacement for typeid explicit DxilGenerationPass(bool NoOpt = false) : ModulePass(ID), m_pHLModule(nullptr), m_extensionsCodegenHelper(nullptr), NotOptimized(NoOpt) {} StringRef getPassName() const override { return "DXIL Generator"; } void SetExtensionsHelper(HLSLExtensionsCodegenHelper *helper) { m_extensionsCodegenHelper = helper; } bool runOnModule(Module &M) override { m_pHLModule = &M.GetOrCreateHLModule(); const ShaderModel *SM = m_pHLModule->GetShaderModel(); // Load up debug information, to cross-reference values and the instructions // used to load them. m_HasDbgInfo = hasDebugInfo(M); // EntrySig for shader functions. DxilEntryPropsMap EntryPropsMap; if (!SM->IsLib()) { Function *EntryFn = m_pHLModule->GetEntryFunction(); if (!m_pHLModule->HasDxilFunctionProps(EntryFn)) { llvm_unreachable("Entry function doesn't have any properties."); return false; } DxilFunctionProps &props = m_pHLModule->GetDxilFunctionProps(EntryFn); std::unique_ptr<DxilEntryProps> pProps = llvm::make_unique<DxilEntryProps>( props, m_pHLModule->GetHLOptions().bUseMinPrecision); HLSignatureLower sigLower(m_pHLModule->GetEntryFunction(), *m_pHLModule, pProps->sig); sigLower.Run(); EntryPropsMap[EntryFn] = std::move(pProps); } else { for (auto It = M.begin(); It != M.end();) { Function &F = *(It++); // Lower signature for each graphics or compute entry function. if (m_pHLModule->HasDxilFunctionProps(&F)) { DxilFunctionProps &props = m_pHLModule->GetDxilFunctionProps(&F); std::unique_ptr<DxilEntryProps> pProps = llvm::make_unique<DxilEntryProps>( props, m_pHLModule->GetHLOptions().bUseMinPrecision); if (m_pHLModule->IsGraphicsShader(&F) || m_pHLModule->IsComputeShader(&F) || m_pHLModule->IsNodeShader(&F)) { HLSignatureLower sigLower(&F, *m_pHLModule, pProps->sig); // TODO: BUG: This will lower patch constant function sigs twice if // used by two hull shaders! sigLower.Run(); } EntryPropsMap[&F] = std::move(pProps); } } } std::unordered_set<Instruction *> UpdateCounterSet; LowerRecordAccessToGetNodeRecordPtr(*m_pHLModule); GenerateDxilOperations(M, UpdateCounterSet); GenerateDxilCBufferHandles(); std::unordered_map<CallInst *, Type *> HandleToResTypeMap; LowerHLCreateHandle(HandleToResTypeMap); MarkUpdateCounter(UpdateCounterSet); // LowerHLCreateHandle() should have translated HLCreateHandle to // CreateHandleForLib. Clean up HLCreateHandle functions. for (auto It = M.begin(); It != M.end();) { Function &F = *(It++); if (!F.isDeclaration()) { if (hlsl::GetHLOpcodeGroupByName(&F) == HLOpcodeGroup::HLCreateHandle) { if (F.user_empty()) { F.eraseFromParent(); } else { llvm_unreachable("Fail to lower createHandle."); } } } } // Translate precise on allocas into function call to keep the information // after mem2reg. The function calls will be removed after propagate precise // attribute. TranslatePreciseAttribute(); // High-level metadata should now be turned into low-level metadata. DxilFunctionProps *pProps = nullptr; if (!SM->IsLib()) { pProps = &EntryPropsMap.begin()->second->props; } const bool SkipInit = true; hlsl::DxilModule &DxilMod = M.GetOrCreateDxilModule(SkipInit); InitDxilModuleFromHLModule(*m_pHLModule, DxilMod, m_HasDbgInfo); DxilMod.ResetEntryPropsMap(std::move(EntryPropsMap)); if (!SM->IsLib()) { DxilMod.SetShaderProperties(pProps); } HLModule::ClearHLMetadata(M); M.ResetHLModule(); if (SM->IsSM62Plus() && DxilMod.GetUseMinPrecision()) { TranslateMinPrecisionRawBuffer(DxilMod, HandleToResTypeMap); } // We now have a DXIL representation - record this. SetPauseResumePasses(M, "hlsl-dxilemit", "hlsl-dxilload"); (void)NotOptimized; // Dummy out unused member to silence warnings return true; } private: void MarkUpdateCounter(std::unordered_set<Instruction *> &UpdateCounterSet); // Generate DXIL cbuffer handles. void GenerateDxilCBufferHandles(); // change built-in funtion into DXIL operations void GenerateDxilOperations(Module &M, std::unordered_set<Instruction *> &UpdateCounterSet); void LowerHLCreateHandle( std::unordered_map<CallInst *, Type *> &HandleToResTypeMap); // Translate precise attribute into HL function call. void TranslatePreciseAttribute(); // Translate RawBufferLoad/RawBufferStore // For DXIL >= 1.2, if min precision is enabled, currently generation pass is // producing i16/f16 return type for min precisions. For rawBuffer, we will // change this so that min precisions are returning its actual scalar type // (i32/f32) and will be truncated to their corresponding types after loading // / before storing. void TranslateMinPrecisionRawBuffer( DxilModule &DM, std::unordered_map<CallInst *, Type *> &HandleToResTypeMap); // Input module is not optimized. bool NotOptimized; }; } // namespace namespace { void TranslateHLCreateHandle(Function *F, hlsl::OP &hlslOP) { Value *opArg = hlslOP.GetU32Const((unsigned)DXIL::OpCode::CreateHandleForLib); for (auto U = F->user_begin(); U != F->user_end();) { Value *user = *(U++); if (!isa<Instruction>(user)) continue; // must be call inst CallInst *CI = cast<CallInst>(user); Value *res = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); Value *newHandle = nullptr; IRBuilder<> Builder(CI); // Res could be ld/phi/select. Will be removed in // DxilLowerCreateHandleForLib. Function *createHandle = hlslOP.GetOpFunc(DXIL::OpCode::CreateHandleForLib, res->getType()); newHandle = Builder.CreateCall(createHandle, {opArg, res}); CI->replaceAllUsesWith(newHandle); if (res->user_empty()) { if (Instruction *I = dyn_cast<Instruction>(res)) I->eraseFromParent(); } CI->eraseFromParent(); } } void TranslateHLCreateNodeOutputHandle(Function *F, hlsl::OP &hlslOP) { for (auto U = F->user_begin(); U != F->user_end();) { Value *user = *(U++); if (!isa<Instruction>(user)) continue; // must be call inst CallInst *CI = cast<CallInst>(user); Value *idx = CI->getArgOperand(HLOperandIndex::kNodeOutputMetadataIDIdx); auto DxilOpcode = DXIL::OpCode::CreateNodeOutputHandle; Value *opArg = hlslOP.GetU32Const((unsigned)DXIL::OpCode::CreateNodeOutputHandle); IRBuilder<> Builder(CI); Function *createHandle = hlslOP.GetOpFunc(DxilOpcode, Builder.getVoidTy()); Value *newHandle = Builder.CreateCall(createHandle, {opArg, idx}); CI->replaceAllUsesWith(newHandle); CI->eraseFromParent(); } } void TranslateHLIndexNodeHandle(Function *F, hlsl::OP &hlslOP) { for (auto U = F->user_begin(); U != F->user_end();) { Value *user = *(U++); if (!isa<Instruction>(user)) continue; CallInst *CI = cast<CallInst>(user); Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); Value *arrayidx = CI->getArgOperand(HLOperandIndex::kIndexNodeHandleArrayIDIdx); auto DxilOpcode = DXIL::OpCode::IndexNodeHandle; Value *opArg = hlslOP.GetU32Const((unsigned)DXIL::OpCode::IndexNodeHandle); IRBuilder<> Builder(CI); Function *createHandle = hlslOP.GetOpFunc(DxilOpcode, Builder.getVoidTy()); Value *newHandle = Builder.CreateCall(createHandle, {opArg, handle, arrayidx}); CI->replaceAllUsesWith(newHandle); CI->eraseFromParent(); } } void TranslateHLCreateNodeInputRecordHandle(Function *F, hlsl::OP &hlslOP) { for (auto U = F->user_begin(); U != F->user_end();) { Value *user = *(U++); if (!isa<Instruction>(user)) continue; // must be a call inst CallInst *CI = cast<CallInst>(user); Value *idx = CI->getArgOperand(HLOperandIndex::kNodeInputRecordMetadataIDIdx); auto DxilOpcode = DXIL::OpCode::CreateNodeInputRecordHandle; Value *opArg = hlslOP.GetU32Const((unsigned)DXIL::OpCode::CreateNodeInputRecordHandle); IRBuilder<> Builder(CI); Function *createHandle = hlslOP.GetOpFunc(DxilOpcode, Builder.getVoidTy()); Value *newHandle = Builder.CreateCall(createHandle, {opArg, idx}); CI->replaceAllUsesWith(newHandle); CI->eraseFromParent(); } } void TranslateHLAnnotateNodeRecordHandle(Function *F, hlsl::OP &hlslOP) { Value *opArg = hlslOP.GetU32Const((unsigned)DXIL::OpCode::AnnotateNodeRecordHandle); for (auto U = F->user_begin(); U != F->user_end();) { Value *user = *(U++); if (!isa<Instruction>(user)) continue; // must be call inst CallInst *CI = cast<CallInst>(user); Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); Value *NP = CI->getArgOperand( HLOperandIndex::kAnnotateNodeRecordHandleNodeRecordPropIdx); IRBuilder<> Builder(CI); // put annotateHandle near the Handle it annotated. if (Instruction *I = dyn_cast<Instruction>(handle)) { if (isa<PHINode>(I)) Builder.SetInsertPoint(I->getParent()->getFirstInsertionPt()); else Builder.SetInsertPoint(I->getNextNode()); } else if (Argument *Arg = dyn_cast<Argument>(handle)) { Builder.SetInsertPoint( Arg->getParent()->getEntryBlock().getFirstInsertionPt()); } Function *annotateHandle = hlslOP.GetOpFunc( DXIL::OpCode::AnnotateNodeRecordHandle, Builder.getVoidTy()); CallInst *newHandle = Builder.CreateCall(annotateHandle, {opArg, handle, NP}); CI->replaceAllUsesWith(newHandle); CI->eraseFromParent(); } } void TranslateHLAnnotateHandle( Function *F, hlsl::OP &hlslOP, std::unordered_map<CallInst *, Type *> &HandleToResTypeMap) { Value *opArg = hlslOP.GetU32Const((unsigned)DXIL::OpCode::AnnotateHandle); for (auto U = F->user_begin(); U != F->user_end();) { Value *user = *(U++); if (!isa<Instruction>(user)) continue; // must be call inst CallInst *CI = cast<CallInst>(user); Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); Value *RP = CI->getArgOperand( HLOperandIndex::kAnnotateHandleResourcePropertiesOpIdx); Type *ResTy = CI->getArgOperand(HLOperandIndex::kAnnotateHandleResourceTypeOpIdx) ->getType(); IRBuilder<> Builder(CI); // put annotateHandle near the Handle it annotated. if (Instruction *I = dyn_cast<Instruction>(handle)) { if (isa<PHINode>(I)) { Builder.SetInsertPoint(I->getParent()->getFirstInsertionPt()); } else { Builder.SetInsertPoint(I->getNextNode()); } } else if (Argument *Arg = dyn_cast<Argument>(handle)) { Builder.SetInsertPoint( Arg->getParent()->getEntryBlock().getFirstInsertionPt()); } Function *annotateHandle = hlslOP.GetOpFunc(DXIL::OpCode::AnnotateHandle, Builder.getVoidTy()); CallInst *newHandle = Builder.CreateCall(annotateHandle, {opArg, handle, RP}); HandleToResTypeMap[newHandle] = ResTy; CI->replaceAllUsesWith(newHandle); CI->eraseFromParent(); } } void TranslateHLAnnotateNodeHandle(Function *F, hlsl::OP &hlslOP) { Value *opArg = hlslOP.GetU32Const((unsigned)DXIL::OpCode::AnnotateNodeHandle); for (auto U = F->user_begin(); U != F->user_end();) { Value *user = *(U++); if (!isa<Instruction>(user)) continue; // must be call inst CallInst *CI = cast<CallInst>(user); Value *handle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); Value *NP = CI->getArgOperand(HLOperandIndex::kAnnotateNodeHandleNodePropIdx); IRBuilder<> Builder(CI); // put AnnotateNodeHandle near the Handle it annotated. if (Instruction *I = dyn_cast<Instruction>(handle)) { if (isa<PHINode>(I)) Builder.SetInsertPoint(I->getParent()->getFirstInsertionPt()); else Builder.SetInsertPoint(I->getNextNode()); } else if (Argument *Arg = dyn_cast<Argument>(handle)) { Builder.SetInsertPoint( Arg->getParent()->getEntryBlock().getFirstInsertionPt()); } Function *annotateHandle = hlslOP.GetOpFunc(DXIL::OpCode::AnnotateNodeHandle, Builder.getVoidTy()); CallInst *newHandle = Builder.CreateCall(annotateHandle, {opArg, handle, NP}); CI->replaceAllUsesWith(newHandle); CI->eraseFromParent(); } } void TranslateHLCastHandleToRes(Function *F, hlsl::OP &hlslOP, const llvm::DataLayout &DL) { for (auto U = F->user_begin(); U != F->user_end();) { Value *User = *(U++); if (!isa<Instruction>(User)) continue; // must be call inst CallInst *CI = cast<CallInst>(User); IRBuilder<> Builder(CI); HLCastOpcode opcode = static_cast<HLCastOpcode>(hlsl::GetHLOpcode(CI)); switch (opcode) { case HLCastOpcode::HandleToNodeOutputCast: { // Do Nothing for now // Perhaps we need to replace the recordtohandle cast users // with the handle argument here. } break; case HLCastOpcode::NodeOutputToHandleCast: { Value *NodeOutputHandle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); Constant *C = dyn_cast<Constant>(NodeOutputHandle); if (C && C->isZeroValue()) { NodeOutputHandle = Constant::getNullValue(hlslOP.GetNodeHandleType()); } else if (auto *CastI = dyn_cast<CallInst>(NodeOutputHandle)) { DXASSERT_NOMSG(hlsl::GetHLOpcodeGroup(CastI->getCalledFunction()) == HLOpcodeGroup::HLCast); NodeOutputHandle = CastI->getArgOperand(HLOperandIndex::kHandleOpIdx); } CI->replaceAllUsesWith(NodeOutputHandle); } break; case HLCastOpcode::NodeRecordToHandleCast: { Value *OutputRecordHandle = CI->getArgOperand(HLOperandIndex::kHandleOpIdx); Constant *C = dyn_cast<Constant>(OutputRecordHandle); if (C && C->isZeroValue()) { OutputRecordHandle = Constant::getNullValue(hlslOP.GetNodeRecordHandleType()); } else if (auto *CastI = dyn_cast<CallInst>(OutputRecordHandle)) { DXASSERT_NOMSG(hlsl::GetHLOpcodeGroup(CastI->getCalledFunction()) == HLOpcodeGroup::HLCast); OutputRecordHandle = CastI->getArgOperand(HLOperandIndex::kHandleOpIdx); } CI->replaceAllUsesWith(OutputRecordHandle); } break; case HLCastOpcode::HandleToResCast: { Value *Handle = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx); for (auto HandleU = CI->user_begin(); HandleU != CI->user_end();) { Value *HandleUser = *(HandleU++); CallInst *HandleCI = dyn_cast<CallInst>(HandleUser); if (!HandleCI) continue; hlsl::HLOpcodeGroup handleGroup = hlsl::GetHLOpcodeGroup(HandleCI->getCalledFunction()); if (handleGroup == HLOpcodeGroup::HLCreateHandle) { HandleCI->replaceAllUsesWith(Handle); HandleCI->eraseFromParent(); } } } break; } if (CI->user_empty()) { CI->eraseFromParent(); } } } } // namespace void DxilGenerationPass::LowerHLCreateHandle( std::unordered_map<CallInst *, Type *> &HandleToResTypeMap) { Module *M = m_pHLModule->GetModule(); hlsl::OP &hlslOP = *m_pHLModule->GetOP(); // Lower cast handle to res/node used by hl.createhandle. for (iplist<Function>::iterator F : M->getFunctionList()) { if (F->user_empty()) continue; hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroup(F); if (group == HLOpcodeGroup::HLCast) { auto DL = M->getDataLayout(); TranslateHLCastHandleToRes(F, hlslOP, DL); } } // generate dxil operation for (iplist<Function>::iterator F : M->getFunctionList()) { if (F->user_empty()) continue; hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroup(F); switch (group) { default: break; case HLOpcodeGroup::HLCreateHandle: TranslateHLCreateHandle(F, hlslOP); break; case HLOpcodeGroup::HLCreateNodeOutputHandle: TranslateHLCreateNodeOutputHandle(F, hlslOP); break; case HLOpcodeGroup::HLIndexNodeHandle: TranslateHLIndexNodeHandle(F, hlslOP); break; case HLOpcodeGroup::HLCreateNodeInputRecordHandle: TranslateHLCreateNodeInputRecordHandle(F, hlslOP); break; case HLOpcodeGroup::HLAnnotateHandle: TranslateHLAnnotateHandle(F, hlslOP, HandleToResTypeMap); break; case HLOpcodeGroup::HLAnnotateNodeHandle: TranslateHLAnnotateNodeHandle(F, hlslOP); break; case HLOpcodeGroup::HLAnnotateNodeRecordHandle: TranslateHLAnnotateNodeRecordHandle(F, hlslOP); break; } } } static void MarkUavUpdateCounter(Value *LoadOrGEP, DxilResource &res, std::unordered_set<Instruction *> &UpdateCounterSet) { if (LoadInst *ldInst = dyn_cast<LoadInst>(LoadOrGEP)) { if (UpdateCounterSet.count(ldInst)) { DXASSERT_NOMSG(res.GetClass() == DXIL::ResourceClass::UAV); res.SetHasCounter(true); } } else { DXASSERT(dyn_cast<GEPOperator>(LoadOrGEP) != nullptr, "else AddOpcodeParamForIntrinsic in CodeGen did not patch uses " "to only have ld/st refer to temp object"); GEPOperator *GEP = cast<GEPOperator>(LoadOrGEP); for (auto GEPU : GEP->users()) { MarkUavUpdateCounter(GEPU, res, UpdateCounterSet); } } } static void MarkUavUpdateCounter(DxilResource &res, std::unordered_set<Instruction *> &UpdateCounterSet) { Value *V = res.GetGlobalSymbol(); for (auto U = V->user_begin(), E = V->user_end(); U != E;) { User *user = *(U++); // Skip unused user. if (user->user_empty()) continue; MarkUavUpdateCounter(user, res, UpdateCounterSet); } } static void MarkUavUpdateCounterForDynamicResource(CallInst &createHdlFromHeap, const ShaderModel &SM) { for (User *U : createHdlFromHeap.users()) { CallInst *CI = dyn_cast<CallInst>(U); if (!CI) continue; DxilInst_AnnotateHandle annotHdl(CI); if (!annotHdl) continue; auto RP = hlsl::resource_helper::loadPropsFromAnnotateHandle(annotHdl, SM); RP.Basic.SamplerCmpOrHasCounter = true; Value *originRP = annotHdl.get_props(); Value *updatedRP = hlsl::resource_helper::getAsConstant(RP, originRP->getType(), SM); annotHdl.set_props(updatedRP); } } void DxilGenerationPass::MarkUpdateCounter( std::unordered_set<Instruction *> &UpdateCounterSet) { for (size_t i = 0; i < m_pHLModule->GetUAVs().size(); i++) { HLResource &UAV = m_pHLModule->GetUAV(i); MarkUavUpdateCounter(UAV, UpdateCounterSet); } auto *hlslOP = m_pHLModule->GetOP(); if (hlslOP->IsDxilOpUsed(DXIL::OpCode::CreateHandleFromHeap)) { const ShaderModel *pSM = m_pHLModule->GetShaderModel(); Function *hdlFromHeap = hlslOP->GetOpFunc(DXIL::OpCode::CreateHandleFromHeap, Type::getVoidTy(m_pHLModule->GetCtx())); for (User *U : hdlFromHeap->users()) { CallInst *CI = cast<CallInst>(U); if (UpdateCounterSet.count(CI) == 0) continue; MarkUavUpdateCounterForDynamicResource(*CI, *pSM); } } } void DxilGenerationPass::GenerateDxilCBufferHandles() { // For CBuffer, handle are mapped to HLCreateHandle. OP *hlslOP = m_pHLModule->GetOP(); Value *opArg = hlslOP->GetU32Const((unsigned)OP::OpCode::CreateHandleForLib); LLVMContext &Ctx = hlslOP->GetCtx(); Value *zeroIdx = hlslOP->GetU32Const(0); for (size_t i = 0; i < m_pHLModule->GetCBuffers().size(); i++) { DxilCBuffer &CB = m_pHLModule->GetCBuffer(i); GlobalVariable *GV = dyn_cast<GlobalVariable>(CB.GetGlobalSymbol()); if (GV == nullptr) continue; // Remove GEP created in HLObjectOperationLowerHelper::UniformCbPtr. GV->removeDeadConstantUsers(); std::string handleName = std::string(GV->getName()); DIVariable *DIV = nullptr; DILocation *DL = nullptr; if (m_HasDbgInfo) { DebugInfoFinder &Finder = m_pHLModule->GetOrCreateDebugInfoFinder(); DIV = dxilutil::FindGlobalVariableDebugInfo(GV, Finder); if (DIV) // TODO: how to get col? DL = DILocation::get(Ctx, DIV->getLine(), 1, DIV->getScope()); } if (CB.GetRangeSize() == 1 && !GV->getType()->getElementType()->isArrayTy()) { Function *createHandle = hlslOP->GetOpFunc( OP::OpCode::CreateHandleForLib, GV->getType()->getElementType()); for (auto U = GV->user_begin(); U != GV->user_end();) { // Must HLCreateHandle. CallInst *CI = cast<CallInst>(*(U++)); // Put createHandle to entry block. IRBuilder<> Builder(dxilutil::FirstNonAllocaInsertionPt(CI)); Value *V = Builder.CreateLoad(GV); CallInst *handle = Builder.CreateCall(createHandle, {opArg, V}, handleName); if (m_HasDbgInfo) { // TODO: add debug info. // handle->setDebugLoc(DL); (void)(DL); } CI->replaceAllUsesWith(handle); CI->eraseFromParent(); } } else { PointerType *Ty = GV->getType(); Type *EltTy = Ty->getElementType()->getArrayElementType()->getPointerTo( Ty->getAddressSpace()); Function *createHandle = hlslOP->GetOpFunc( OP::OpCode::CreateHandleForLib, EltTy->getPointerElementType()); for (auto U = GV->user_begin(); U != GV->user_end();) { // Must HLCreateHandle. CallInst *CI = cast<CallInst>(*(U++)); IRBuilder<> Builder(CI); Value *CBIndex = CI->getArgOperand(HLOperandIndex::kCreateHandleIndexOpIdx); if (isa<ConstantInt>(CBIndex)) { // Put createHandle to entry block for const index. Builder.SetInsertPoint(dxilutil::FirstNonAllocaInsertionPt(CI)); } // Add GEP for cbv array use. Value *GEP = Builder.CreateGEP(GV, {zeroIdx, CBIndex}); if (DxilMDHelper::IsMarkedNonUniform(CI)) { DxilMDHelper::MarkNonUniform(cast<Instruction>(GEP)); } Value *V = Builder.CreateLoad(GEP); CallInst *handle = Builder.CreateCall(createHandle, {opArg, V}, handleName); CI->replaceAllUsesWith(handle); CI->eraseFromParent(); } } } } void DxilGenerationPass::GenerateDxilOperations( Module &M, std::unordered_set<Instruction *> &UpdateCounterSet) { // remove all functions except entry function Function *entry = m_pHLModule->GetEntryFunction(); const ShaderModel *pSM = m_pHLModule->GetShaderModel(); Function *patchConstantFunc = nullptr; if (pSM->IsHS()) { DxilFunctionProps &funcProps = m_pHLModule->GetDxilFunctionProps(entry); patchConstantFunc = funcProps.ShaderProps.HS.patchConstantFunc; } if (!pSM->IsLib()) { for (auto F = M.begin(); F != M.end();) { Function *func = F++; if (func->isDeclaration()) continue; if (func == entry) continue; if (func == patchConstantFunc) continue; if (func->user_empty()) func->eraseFromParent(); } } TranslateBuiltinOperations(*m_pHLModule, m_extensionsCodegenHelper, UpdateCounterSet); // Remove unused HL Operation functions. std::vector<Function *> deadList; for (iplist<Function>::iterator F : M.getFunctionList()) { hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroupByName(F); if (group != HLOpcodeGroup::NotHL || F->isIntrinsic()) if (F->user_empty()) deadList.emplace_back(F); } for (Function *F : deadList) F->eraseFromParent(); } static void TranslatePreciseAttributeOnFunction(Function &F, Module &M) { BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function // Find allocas that has precise attribute, by looking at all instructions in // the entry node for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E;) { Instruction *Inst = (I++); if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst)) { if (HLModule::HasPreciseAttributeWithMetadata(AI)) { HLModule::MarkPreciseAttributeOnPtrWithFunctionCall(AI, M); } } else { DXASSERT(!HLModule::HasPreciseAttributeWithMetadata(Inst), "Only alloca can has precise metadata."); } } FastMathFlags FMF; FMF.setUnsafeAlgebra(); // Set fast math for all FPMathOperators. // Already set FastMath in options. But that only enable things like fadd. // Every inst which type is float can be cast to FPMathOperator. for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) { BasicBlock *BB = BBI; for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { if (dyn_cast<FPMathOperator>(I)) { // Set precise fast math on those instructions that support it. if (DxilModule::PreservesFastMathFlags(I)) I->copyFastMathFlags(FMF); } } } } void DxilGenerationPass::TranslatePreciseAttribute() { bool bIEEEStrict = m_pHLModule->GetHLOptions().bIEEEStrict; if (bIEEEStrict) { // mark precise on dxil operations. Module &M = *m_pHLModule->GetModule(); for (Function &F : M) { if (!hlsl::OP::IsDxilOpFunc(&F)) continue; if (!F.getReturnType()->isFPOrFPVectorTy()) continue; for (User *U : F.users()) { Instruction *I = dyn_cast<Instruction>(U); if (!I) continue; IRBuilder<> B(I); HLModule::MarkPreciseAttributeOnValWithFunctionCall(I, B, M); } } return; } Module &M = *m_pHLModule->GetModule(); // TODO: If not inline every function, for function has call site with precise // argument and call site without precise argument, need to clone the function // to propagate the precise for the precise call site. // This should be done at CGMSHLSLRuntime::FinishCodeGen. if (m_pHLModule->GetShaderModel()->IsLib()) { // TODO: If all functions have been inlined, and unreferenced functions // removed, // it should make sense to run on all funciton bodies, // even when not processing a library. for (Function &F : M.functions()) { if (!F.isDeclaration()) TranslatePreciseAttributeOnFunction(F, M); } } else { Function *EntryFn = m_pHLModule->GetEntryFunction(); TranslatePreciseAttributeOnFunction(*EntryFn, M); if (m_pHLModule->GetShaderModel()->IsHS()) { DxilFunctionProps &EntryQual = m_pHLModule->GetDxilFunctionProps(EntryFn); Function *patchConstantFunc = EntryQual.ShaderProps.HS.patchConstantFunc; TranslatePreciseAttributeOnFunction(*patchConstantFunc, M); } } } namespace { void ReplaceMinPrecisionRawBufferLoadByType(Function *F, Type *FromTy, Type *ToTy, OP *Op, const DataLayout &DL) { Function *newFunction = Op->GetOpFunc(DXIL::OpCode::RawBufferLoad, ToTy); for (auto FUser = F->user_begin(), FEnd = F->user_end(); FUser != FEnd;) { User *UserCI = *(FUser++); if (CallInst *CI = dyn_cast<CallInst>(UserCI)) { IRBuilder<> CIBuilder(CI); SmallVector<Value *, 5> newFuncArgs; // opcode, handle, index, elementOffset, mask // Compiler is generating correct element offset even for min precision // types So no need to recalculate here for (unsigned i = 0; i < 5; ++i) { newFuncArgs.emplace_back(CI->getArgOperand(i)); } // new alignment for new type newFuncArgs.emplace_back(Op->GetI32Const(DL.getTypeAllocSize(ToTy))); CallInst *newCI = CIBuilder.CreateCall(newFunction, newFuncArgs); for (auto CIUser = CI->user_begin(), CIEnd = CI->user_end(); CIUser != CIEnd;) { User *UserEV = *(CIUser++); if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(UserEV)) { IRBuilder<> EVBuilder(EV); ArrayRef<unsigned> Indices = EV->getIndices(); DXASSERT(Indices.size() == 1, "Otherwise we have wrong extract value."); Value *newEV = EVBuilder.CreateExtractValue(newCI, Indices); Value *newTruncV = nullptr; if (4 == Indices[0]) { // Don't truncate status newTruncV = newEV; } else if (FromTy->isHalfTy()) { newTruncV = EVBuilder.CreateFPTrunc(newEV, FromTy); } else if (FromTy->isIntegerTy()) { newTruncV = EVBuilder.CreateTrunc(newEV, FromTy); } else { DXASSERT(false, "unexpected type conversion"); } EV->replaceAllUsesWith(newTruncV); EV->eraseFromParent(); } } CI->eraseFromParent(); } } F->eraseFromParent(); } void ReplaceMinPrecisionRawBufferStoreByType( Function *F, Type *FromTy, Type *ToTy, OP *Op, std::unordered_map<CallInst *, Type *> &HandleToResTypeMap, DxilTypeSystem &typeSys, const DataLayout &DL) { Function *newFunction = Op->GetOpFunc(DXIL::OpCode::RawBufferStore, ToTy); // for each function // add argument 4-7 to its upconverted values // replace function call for (auto FuncUser = F->user_begin(), FuncEnd = F->user_end(); FuncUser != FuncEnd;) { CallInst *CI = dyn_cast<CallInst>(*(FuncUser++)); DXASSERT(CI, "function user must be a call instruction."); IRBuilder<> CIBuilder(CI); SmallVector<Value *, 9> Args; for (unsigned i = 0; i < 4; ++i) { Args.emplace_back(CI->getArgOperand(i)); } // values to store should be converted to its higher precision types if (FromTy->isHalfTy()) { for (unsigned i = 4; i < 8; ++i) { Value *NewV = CIBuilder.CreateFPExt(CI->getArgOperand(i), ToTy); Args.emplace_back(NewV); } } else if (FromTy->isIntegerTy()) { // This case only applies to typed buffer since Store operation of byte // address buffer for min precision is handled by implicit conversion on // intrinsic call. Since we are extending integer, we have to know if we // should sign ext or zero ext. We can do this by iterating checking the // size of the element at struct type and comp type at type annotation CallInst *handleCI = dyn_cast<CallInst>( CI->getArgOperand(DxilInst_RawBufferStore::arg_uav)); DXASSERT(handleCI, "otherwise handle was not an argument to buffer store."); auto resTyIt = HandleToResTypeMap.find(handleCI); DXASSERT(resTyIt != HandleToResTypeMap.end(), "otherwise fail to handle for buffer store lost its retTy"); StructType *STy = dyn_cast<StructType>(resTyIt->second); STy = cast<StructType>(STy->getElementType(0)); DxilStructAnnotation *SAnnot = typeSys.GetStructAnnotation(STy); ConstantInt *offsetInt = dyn_cast<ConstantInt>( CI->getArgOperand(DxilInst_RawBufferStore::arg_elementOffset)); unsigned offset = offsetInt->getSExtValue(); unsigned currentOffset = 0; for (DxilStructTypeIterator iter = begin(STy, SAnnot), ItEnd = end(STy, SAnnot); iter != ItEnd; ++iter) { std::pair<Type *, DxilFieldAnnotation *> pair = *iter; currentOffset += DL.getTypeAllocSize(pair.first); if (currentOffset > offset) { if (pair.second->GetCompType().IsUIntTy()) { for (unsigned i = 4; i < 8; ++i) { Value *NewV = CIBuilder.CreateZExt(CI->getArgOperand(i), ToTy); Args.emplace_back(NewV); } break; } else if (pair.second->GetCompType().IsIntTy()) { for (unsigned i = 4; i < 8; ++i) { Value *NewV = CIBuilder.CreateSExt(CI->getArgOperand(i), ToTy); Args.emplace_back(NewV); } break; } else { DXASSERT(false, "Invalid comp type"); } } } } // mask Args.emplace_back(CI->getArgOperand(8)); // alignment Args.emplace_back(CIBuilder.getInt32(DL.getTypeAllocSize(ToTy))); CIBuilder.CreateCall(newFunction, Args); CI->eraseFromParent(); } } } // namespace void DxilGenerationPass::TranslateMinPrecisionRawBuffer( DxilModule &DM, std::unordered_map<CallInst *, Type *> &HandleToResTypeMap) { hlsl::OP *hlslOP = DM.GetOP(); LLVMContext &Ctx = DM.GetCtx(); Type *I32Ty = Type::getInt32Ty(Ctx); Type *I16Ty = Type::getInt16Ty(Ctx); Type *F32Ty = Type::getFloatTy(Ctx); Type *F16Ty = Type::getHalfTy(Ctx); const DataLayout &DL = DM.GetModule()->getDataLayout(); DxilTypeSystem &typeSys = DM.GetTypeSystem(); SmallVector<Function *, 2> rawBufLoads; for (auto it : hlslOP->GetOpFuncList(DXIL::OpCode::RawBufferLoad)) { Function *F = it.second; if (!F) continue; rawBufLoads.emplace_back(F); } for (Function *F : rawBufLoads) { StructType *RetTy = cast<StructType>(F->getReturnType()); Type *EltTy = RetTy->getElementType(0); if (EltTy->isHalfTy()) { ReplaceMinPrecisionRawBufferLoadByType(F, F16Ty, F32Ty, hlslOP, DL); } else if (EltTy == I16Ty) { ReplaceMinPrecisionRawBufferLoadByType(F, I16Ty, I32Ty, hlslOP, DL); } } SmallVector<Function *, 2> rawBufStores; for (auto it : hlslOP->GetOpFuncList(DXIL::OpCode::RawBufferStore)) { Function *F = it.second; if (!F) continue; rawBufStores.emplace_back(F); } for (Function *F : rawBufStores) { Type *EltTy = F->getFunctionType()->getParamType(DxilInst_RawBufferStore::arg_value0); if (EltTy->isHalfTy()) { ReplaceMinPrecisionRawBufferStoreByType(F, F16Ty, F32Ty, hlslOP, HandleToResTypeMap, typeSys, DL); } else if (EltTy == I16Ty) { ReplaceMinPrecisionRawBufferStoreByType(F, I16Ty, I32Ty, hlslOP, HandleToResTypeMap, typeSys, DL); } } } char DxilGenerationPass::ID = 0; ModulePass *llvm::createDxilGenerationPass( bool NotOptimized, hlsl::HLSLExtensionsCodegenHelper *extensionsHelper) { DxilGenerationPass *dxilPass = new DxilGenerationPass(NotOptimized); dxilPass->SetExtensionsHelper(extensionsHelper); return dxilPass; } INITIALIZE_PASS(DxilGenerationPass, "dxilgen", "HLSL DXIL Generation", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLMetadataPasses.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLMetadataPasses.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/HLSL/HLModule.h" #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" using namespace llvm; using namespace hlsl; namespace { class HLEmitMetadata : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit HLEmitMetadata() : ModulePass(ID) {} StringRef getPassName() const override { return "HLSL High-Level Metadata Emit"; } bool runOnModule(Module &M) override { if (M.HasHLModule()) { HLModule::ClearHLMetadata(M); M.GetHLModule().EmitHLMetadata(); return true; } return false; } }; } // namespace char HLEmitMetadata::ID = 0; ModulePass *llvm::createHLEmitMetadataPass() { return new HLEmitMetadata(); } INITIALIZE_PASS(HLEmitMetadata, "hlsl-hlemit", "HLSL High-Level Metadata Emit", false, false) /////////////////////////////////////////////////////////////////////////////// namespace { class HLEnsureMetadata : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit HLEnsureMetadata() : ModulePass(ID) {} StringRef getPassName() const override { return "HLSL High-Level Metadata Ensure"; } bool runOnModule(Module &M) override { if (!M.HasHLModule()) { M.GetOrCreateHLModule(); return true; } return false; } }; } // namespace char HLEnsureMetadata::ID = 0; ModulePass *llvm::createHLEnsureMetadataPass() { return new HLEnsureMetadata(); } INITIALIZE_PASS(HLEnsureMetadata, "hlsl-hlensure", "HLSL High-Level Metadata Ensure", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilNoOptLegalize.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilNoOptLegalize.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/HLSL/DxilNoops.h" #include "dxc/Support/Global.h" #include "llvm/Analysis/DxilValueCache.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" using namespace llvm; class DxilNoOptLegalize : public ModulePass { SmallVector<Value *, 16> Worklist; public: static char ID; DxilNoOptLegalize() : ModulePass(ID) { initializeDxilNoOptLegalizePass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; bool RemoveStoreUndefsFromPtr(Value *V); bool RemoveStoreUndefs(Module &M); }; char DxilNoOptLegalize::ID; bool DxilNoOptLegalize::RemoveStoreUndefsFromPtr(Value *Ptr) { bool Changed = false; Worklist.clear(); Worklist.push_back(Ptr); while (Worklist.size()) { Value *V = Worklist.back(); Worklist.pop_back(); if (isa<AllocaInst>(V) || isa<GlobalVariable>(V) || isa<GEPOperator>(V)) { for (User *U : V->users()) Worklist.push_back(U); } else if (StoreInst *Store = dyn_cast<StoreInst>(V)) { if (isa<UndefValue>(Store->getValueOperand())) { Store->eraseFromParent(); Changed = true; } } } return Changed; } bool DxilNoOptLegalize::RemoveStoreUndefs(Module &M) { bool Changed = false; for (GlobalVariable &GV : M.globals()) { Changed |= RemoveStoreUndefsFromPtr(&GV); } for (Function &F : M) { if (F.empty()) continue; BasicBlock &Entry = F.getEntryBlock(); for (Instruction &I : Entry) { if (isa<AllocaInst>(&I)) Changed |= RemoveStoreUndefsFromPtr(&I); } } return Changed; } bool DxilNoOptLegalize::runOnModule(Module &M) { bool Changed = false; Changed |= RemoveStoreUndefs(M); return Changed; } ModulePass *llvm::createDxilNoOptLegalizePass() { return new DxilNoOptLegalize(); } INITIALIZE_PASS(DxilNoOptLegalize, "dxil-o0-legalize", "DXIL No-Opt Legalize", false, false) class DxilNoOptSimplifyInstructions : public ModulePass { SmallVector<Value *, 16> Worklist; public: static char ID; DxilNoOptSimplifyInstructions() : ModulePass(ID) { initializeDxilNoOptSimplifyInstructionsPass( *PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DxilValueCache>(); } bool runOnModule(Module &M) override { bool Changed = false; DxilValueCache *DVC = &getAnalysis<DxilValueCache>(); for (Function &F : M) { for (BasicBlock &BB : F) { for (auto it = BB.begin(), end = BB.end(); it != end;) { Instruction *I = &*(it++); if (I->getOpcode() == Instruction::Select) { if (hlsl::IsPreserve(I)) continue; if (Value *C = DVC->GetValue(I)) { I->replaceAllUsesWith(C); I->eraseFromParent(); Changed = true; } } else if (PHINode *Phi = dyn_cast<PHINode>(I)) { // Replace all simple phi values (such as those inserted by lcssa) // with the value itself. This avoids phis in places they are not // expected because the normal simplify passes will clean them up. if (Value *NewPhi = llvm::SimplifyInstruction(Phi, M.getDataLayout())) { Phi->replaceAllUsesWith(NewPhi); Phi->eraseFromParent(); Changed = true; } } } } } return Changed; } }; char DxilNoOptSimplifyInstructions::ID; ModulePass *llvm::createDxilNoOptSimplifyInstructionsPass() { return new DxilNoOptSimplifyInstructions(); } INITIALIZE_PASS(DxilNoOptSimplifyInstructions, "dxil-o0-simplify-inst", "DXIL No-Opt Simplify Inst", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilPreparePasses.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilPreparePasses.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Passes to prepare DxilModule. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilConstants.h" #include "dxc/DXIL/DxilEntryProps.h" #include "dxc/DXIL/DxilFunctionProps.h" #include "dxc/DXIL/DxilInstructions.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilTypeSystem.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/HLSL/DxilPoisonValues.h" #include "dxc/HLSL/HLOperations.h" #include "dxc/HlslIntrinsicOp.h" #include "dxc/Support/Global.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/DxilValueCache.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/PostDominators.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" #include "llvm/Pass.h" #include "llvm/Transforms/Utils/Local.h" #include <memory> #include <unordered_set> using namespace llvm; using namespace hlsl; namespace { class InvalidateUndefResources : public ModulePass { public: static char ID; explicit InvalidateUndefResources() : ModulePass(ID) { initializeScalarizerPass(*PassRegistry::getPassRegistry()); } StringRef getPassName() const override { return "Invalidate undef resources"; } bool runOnModule(Module &M) override; }; } // namespace char InvalidateUndefResources::ID = 0; ModulePass *llvm::createInvalidateUndefResourcesPass() { return new InvalidateUndefResources(); } INITIALIZE_PASS(InvalidateUndefResources, "invalidate-undef-resource", "Invalidate undef resources", false, false) bool InvalidateUndefResources::runOnModule(Module &M) { // Undef resources typically indicate uninitialized locals being used // in some code path, which we should catch and report. However, some // code patterns in large shaders cause dead undef resources to momentarily, // which is not an error. We must wait until cleanup passes // have run to know whether we must produce an error. // However, we can't leave the undef values in because they could eliminated, // such as by reading from resources seen in a code path that was not taken. // We avoid the problem by replacing undef values by another invalid // value that we can identify later. for (auto &F : M.functions()) { if (GetHLOpcodeGroupByName(&F) == HLOpcodeGroup::HLCreateHandle) { Type *ResTy = F.getFunctionType()->getParamType( HLOperandIndex::kCreateHandleResourceOpIdx); UndefValue *UndefRes = UndefValue::get(ResTy); if (!UndefRes->use_empty()) { Constant *InvalidRes = ConstantAggregateZero::get(ResTy); UndefRes->replaceAllUsesWith(InvalidRes); } } } return false; } /////////////////////////////////////////////////////////////////////////////// namespace { class SimplifyInst : public FunctionPass { public: static char ID; SimplifyInst() : FunctionPass(ID) { initializeScalarizerPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; private: }; } // namespace char SimplifyInst::ID = 0; FunctionPass *llvm::createSimplifyInstPass() { return new SimplifyInst(); } INITIALIZE_PASS(SimplifyInst, "simplify-inst", "Simplify Instructions", false, false) bool SimplifyInst::runOnFunction(Function &F) { for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) { BasicBlock *BB = BBI; llvm::SimplifyInstructionsInBlock(BB, nullptr); } return true; } /////////////////////////////////////////////////////////////////////////////// namespace { class DxilDeadFunctionElimination : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilDeadFunctionElimination() : ModulePass(ID) {} StringRef getPassName() const override { return "Remove all unused function except entry from DxilModule"; } bool runOnModule(Module &M) override { if (M.HasDxilModule()) { DxilModule &DM = M.GetDxilModule(); bool IsLib = DM.GetShaderModel()->IsLib(); // Remove unused functions except entry and patch constant func. // For library profile, only remove unused external functions. Function *EntryFunc = DM.GetEntryFunction(); Function *PatchConstantFunc = DM.GetPatchConstantFunction(); return dxilutil::RemoveUnusedFunctions(M, EntryFunc, PatchConstantFunc, IsLib); } return false; } }; } // namespace char DxilDeadFunctionElimination::ID = 0; ModulePass *llvm::createDxilDeadFunctionEliminationPass() { return new DxilDeadFunctionElimination(); } INITIALIZE_PASS(DxilDeadFunctionElimination, "dxil-dfe", "Remove all unused function except entry from DxilModule", false, false) /////////////////////////////////////////////////////////////////////////////// bool CleanupSharedMemoryAddrSpaceCast(Module &M); namespace { static void TransferEntryFunctionAttributes(Function *F, Function *NewFunc) { // Keep necessary function attributes AttributeSet attributeSet = F->getAttributes(); StringRef attrKind, attrValue; if (attributeSet.hasAttribute(AttributeSet::FunctionIndex, DXIL::kFP32DenormKindString)) { Attribute attribute = attributeSet.getAttribute( AttributeSet::FunctionIndex, DXIL::kFP32DenormKindString); DXASSERT(attribute.isStringAttribute(), "otherwise we have wrong fp-denorm-mode attribute."); attrKind = attribute.getKindAsString(); attrValue = attribute.getValueAsString(); } bool helperLane = attributeSet.hasAttribute( AttributeSet::FunctionIndex, DXIL::kWaveOpsIncludeHelperLanesString); if (F == NewFunc) { NewFunc->removeAttributes(AttributeSet::FunctionIndex, attributeSet); } if (!attrKind.empty() && !attrValue.empty()) NewFunc->addFnAttr(attrKind, attrValue); if (helperLane) NewFunc->addFnAttr(DXIL::kWaveOpsIncludeHelperLanesString); } // If this returns non-null, the old function F has been stripped and can be // deleted. static Function *StripFunctionParameter( Function *F, DxilModule &DM, DenseMap<const Function *, DISubprogram *> &FunctionDIs) { if (F->arg_empty() && F->getReturnType()->isVoidTy()) { // This will strip non-entry function attributes TransferEntryFunctionAttributes(F, F); return nullptr; } Module &M = *DM.GetModule(); Type *VoidTy = Type::getVoidTy(M.getContext()); FunctionType *FT = FunctionType::get(VoidTy, false); for (auto &arg : F->args()) { if (!arg.user_empty()) return nullptr; DbgDeclareInst *DDI = llvm::FindAllocaDbgDeclare(&arg); if (DDI) { DDI->eraseFromParent(); } } Function *NewFunc = Function::Create(FT, F->getLinkage()); M.getFunctionList().insert(F, NewFunc); // Splice the body of the old function right into the new function. NewFunc->getBasicBlockList().splice(NewFunc->begin(), F->getBasicBlockList()); TransferEntryFunctionAttributes(F, NewFunc); // Patch the pointer to LLVM function in debug info descriptor. auto DI = FunctionDIs.find(F); if (DI != FunctionDIs.end()) { DISubprogram *SP = DI->second; SP->replaceFunction(NewFunc); // Ensure the map is updated so it can be reused on subsequent argument // promotions of the same function. FunctionDIs.erase(DI); FunctionDIs[NewFunc] = SP; } NewFunc->takeName(F); if (DM.HasDxilFunctionProps(F)) { DM.ReplaceDxilEntryProps(F, NewFunc); } DM.GetTypeSystem().EraseFunctionAnnotation(F); DM.GetTypeSystem().AddFunctionAnnotation(NewFunc); return NewFunc; } void CheckInBoundForTGSM(GlobalVariable &GV, const DataLayout &DL) { for (User *U : GV.users()) { if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { bool allImmIndex = true; for (auto Idx = GEP->idx_begin(), E = GEP->idx_end(); Idx != E; Idx++) { if (!isa<ConstantInt>(Idx)) { allImmIndex = false; break; } } if (!allImmIndex) GEP->setIsInBounds(false); else { Value *Ptr = GEP->getPointerOperand(); unsigned size = DL.getTypeAllocSize(Ptr->getType()->getPointerElementType()); unsigned valSize = DL.getTypeAllocSize(GEP->getType()->getPointerElementType()); SmallVector<Value *, 8> Indices(GEP->idx_begin(), GEP->idx_end()); unsigned offset = DL.getIndexedOffset(GEP->getPointerOperandType(), Indices); if ((offset + valSize) > size) GEP->setIsInBounds(false); } } } } static bool GetUnsignedVal(Value *V, uint32_t *pValue) { ConstantInt *CI = dyn_cast<ConstantInt>(V); if (!CI) return false; uint64_t u = CI->getZExtValue(); if (u > UINT32_MAX) return false; *pValue = (uint32_t)u; return true; } static void MarkUsedSignatureElements(Function *F, DxilModule &DM) { DXASSERT_NOMSG(F != nullptr); // For every loadInput/storeOutput, update the corresponding ReadWriteMask. // F is a pointer to a Function instance for (llvm::inst_iterator I = llvm::inst_begin(F), E = llvm::inst_end(F); I != E; ++I) { DxilInst_LoadInput LI(&*I); DxilInst_StoreOutput SO(&*I); DxilInst_LoadPatchConstant LPC(&*I); DxilInst_StorePatchConstant SPC(&*I); DxilInst_StoreVertexOutput SVO(&*I); DxilInst_StorePrimitiveOutput SPO(&*I); DxilSignature *pSig; uint32_t col, row, sigId; bool bDynIdx = false; if (LI) { if (!GetUnsignedVal(LI.get_inputSigId(), &sigId)) continue; if (!GetUnsignedVal(LI.get_colIndex(), &col)) continue; if (!GetUnsignedVal(LI.get_rowIndex(), &row)) bDynIdx = true; pSig = &DM.GetInputSignature(); } else if (SO) { if (!GetUnsignedVal(SO.get_outputSigId(), &sigId)) continue; if (!GetUnsignedVal(SO.get_colIndex(), &col)) continue; if (!GetUnsignedVal(SO.get_rowIndex(), &row)) bDynIdx = true; pSig = &DM.GetOutputSignature(); } else if (SPC) { if (!GetUnsignedVal(SPC.get_outputSigID(), &sigId)) continue; if (!GetUnsignedVal(SPC.get_col(), &col)) continue; if (!GetUnsignedVal(SPC.get_row(), &row)) bDynIdx = true; pSig = &DM.GetPatchConstOrPrimSignature(); } else if (LPC) { if (!GetUnsignedVal(LPC.get_inputSigId(), &sigId)) continue; if (!GetUnsignedVal(LPC.get_col(), &col)) continue; if (!GetUnsignedVal(LPC.get_row(), &row)) bDynIdx = true; pSig = &DM.GetPatchConstOrPrimSignature(); } else if (SVO) { if (!GetUnsignedVal(SVO.get_outputSigId(), &sigId)) continue; if (!GetUnsignedVal(SVO.get_colIndex(), &col)) continue; if (!GetUnsignedVal(SVO.get_rowIndex(), &row)) bDynIdx = true; pSig = &DM.GetOutputSignature(); } else if (SPO) { if (!GetUnsignedVal(SPO.get_outputSigId(), &sigId)) continue; if (!GetUnsignedVal(SPO.get_colIndex(), &col)) continue; if (!GetUnsignedVal(SPO.get_rowIndex(), &row)) bDynIdx = true; pSig = &DM.GetPatchConstOrPrimSignature(); } else { continue; } // Consider being more fine-grained about masks. // We report sometimes-read on input as always-read. auto &El = pSig->GetElement(sigId); unsigned UsageMask = El.GetUsageMask(); unsigned colBit = 1 << col; if (!(colBit & UsageMask)) { El.SetUsageMask(UsageMask | colBit); } if (bDynIdx && (El.GetDynIdxCompMask() & colBit) == 0) { El.SetDynIdxCompMask(El.GetDynIdxCompMask() | colBit); } } } class DxilFinalizeModule : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilFinalizeModule() : ModulePass(ID) {} StringRef getPassName() const override { return "HLSL DXIL Finalize Module"; } void patchInstructionMetadata(Module &M, DenseSet<unsigned> &IllegalMDSet) { for (auto &F : M.getFunctionList()) { for (auto &BB : F) { for (auto &I : BB) { if (I.hasMetadataOtherThanDebugLoc()) { SmallVector<std::pair<unsigned, MDNode *>, 2> MDs; I.getAllMetadataOtherThanDebugLoc(MDs); for (auto &MD : MDs) { unsigned kind = MD.first; // Remove illegal metadata. if (IllegalMDSet.count(kind)) I.setMetadata(kind, nullptr); } } } } } } void RemoveAnnotateHandle(hlsl::OP *hlslOP) { for (auto it : hlslOP->GetOpFuncList(DXIL::OpCode::AnnotateHandle)) { Function *F = it.second; if (!F) continue; for (auto uit = F->user_begin(); uit != F->user_end();) { CallInst *CI = cast<CallInst>(*(uit++)); DxilInst_AnnotateHandle annoteHdl(CI); Value *hdl = annoteHdl.get_res(); CI->replaceAllUsesWith(hdl); CI->eraseFromParent(); } } } /////////////////////////////////////////////////// // IsHelperLane() lowering for SM < 6.6 // Identify pattern icmp_eq(0, dx.coverage()) bool IsCmpZOfCoverage(Value *V, hlsl::OP *hlslOP) { if (ICmpInst *IC = dyn_cast<ICmpInst>(V)) { if (IC->getPredicate() == ICmpInst::ICMP_EQ) { Value *V0 = IC->getOperand(0); Value *V1 = IC->getOperand(1); if (!isa<ConstantInt>(V0)) std::swap(V0, V1); if (ConstantInt *C = dyn_cast<ConstantInt>(V0)) { if (CallInst *CI = dyn_cast<CallInst>(V1)) { // compare dx.op.coverage with zero if (C->isZero() && hlslOP->IsDxilOpFuncCallInst(CI, DXIL::OpCode::Coverage)) { return true; } } } } } return false; } // Identify init as use in entry block that either: // - non-PS: store i32 0 // - PS: store zext(icmp_eq(0, dx.coverage())) bool IsInitOfIsHelperGV(User *U, hlsl::OP *hlslOP) { if (StoreInst *SI = dyn_cast<StoreInst>(U)) { BasicBlock *BB = SI->getParent(); if (BB == &BB->getParent()->getEntryBlock()) { Value *V = SI->getValueOperand(); if (ConstantInt *C = dyn_cast<ConstantInt>(V)) { if (C->isZero()) { return true; } } else if (ZExtInst *ZEI = dyn_cast<ZExtInst>(V)) { if (IsCmpZOfCoverage(ZEI->getOperand(0), hlslOP)) { return true; } } } } return false; } void RemoveFnIfIsHelperInit(User *U, hlsl::OP *hlslOP, SmallSetVector<Function *, 4> &psEntries) { if (Instruction *I = dyn_cast<Instruction>(U)) { // Early out: only check if in function still in set Function *F = I->getParent()->getParent(); if (!psEntries.count(F)) return; if (IsInitOfIsHelperGV(I, hlslOP)) { psEntries.remove(F); } } } // Init IsHelper GV to zext(!dx.op.coverage()) in PS entry points void InitIsHelperGV(Module &M) { GlobalVariable *GV = M.getGlobalVariable(DXIL::kDxIsHelperGlobalName, /*AllowLocal*/ true); if (!GV) return; DxilModule &DM = M.GetDxilModule(); hlsl::OP *hlslOP = DM.GetOP(); const ShaderModel *pSM = DM.GetShaderModel(); // If PS, and GV is ExternalLinkage, change to InternalLinkage // This can happen after link to final PS. if (pSM->IsPS() && GV->getLinkage() == GlobalValue::ExternalLinkage) { GV->setLinkage(GlobalValue::InternalLinkage); } // add PS entry points to set SmallSetVector<Function *, 4> psEntries; if (pSM->IsPS()) { psEntries.insert(DM.GetEntryFunction()); } else if (pSM->IsLib()) { for (auto &F : M.functions()) { if (DM.HasDxilEntryProps(&F)) { if (DM.GetDxilEntryProps(&F).props.IsPS()) { psEntries.insert(&F); } } } } // iterate users of GV to skip entries that already init GV for (auto &U : GV->uses()) { RemoveFnIfIsHelperInit(U.getUser(), DM.GetOP(), psEntries); } // store zext(!dx.op.coverage()) Type *I32Ty = Type::getInt32Ty(hlslOP->GetCtx()); Constant *C0 = hlslOP->GetI32Const(0); Constant *OpArg = hlslOP->GetI32Const((int)DXIL::OpCode::Coverage); Function *CoverageF = nullptr; for (auto *F : psEntries) { if (!CoverageF) CoverageF = hlslOP->GetOpFunc(DXIL::OpCode::Coverage, I32Ty); IRBuilder<> Builder(F->getEntryBlock().getFirstInsertionPt()); Value *V = Builder.CreateCall(CoverageF, {OpArg}); V = Builder.CreateICmpEQ(C0, V); V = Builder.CreateZExt(V, I32Ty); Builder.CreateStore(V, GV); } } GlobalVariable *GetIsHelperGV(Module &M) { return M.getGlobalVariable(DXIL::kDxIsHelperGlobalName, /*AllowLocal*/ true); } GlobalVariable *GetOrCreateIsHelperGV(Module &M, hlsl::OP *hlslOP) { GlobalVariable *GV = GetIsHelperGV(M); if (GV) return GV; DxilModule &DM = M.GetDxilModule(); const ShaderModel *pSM = DM.GetShaderModel(); GV = new GlobalVariable(M, IntegerType::get(M.getContext(), 32), /*constant*/ false, pSM->IsLib() ? GlobalValue::ExternalLinkage : GlobalValue::InternalLinkage, /*Initializer*/ hlslOP->GetI32Const(0), DXIL::kDxIsHelperGlobalName); return GV; } // Replace IsHelperLane() with false (for non-lib, non-PS SM) void ReplaceIsHelperWithConstFalse(hlsl::OP *hlslOP) { Constant *False = hlslOP->GetI1Const(0); bool bDone = false; while (!bDone) { bDone = true; for (auto it : hlslOP->GetOpFuncList(DXIL::OpCode::IsHelperLane)) { Function *F = it.second; if (!F) continue; for (auto uit = F->user_begin(); uit != F->user_end();) { CallInst *CI = dyn_cast<CallInst>(*(uit++)); CI->replaceAllUsesWith(False); CI->eraseFromParent(); } hlslOP->RemoveFunction(F); F->eraseFromParent(); bDone = false; break; } } } void ConvertIsHelperToLoadGV(hlsl::OP *hlslOP) { GlobalVariable *GV = nullptr; Type *I1Ty = Type::getInt1Ty(hlslOP->GetCtx()); bool bDone = false; while (!bDone) { bDone = true; for (auto it : hlslOP->GetOpFuncList(DXIL::OpCode::IsHelperLane)) { Function *F = it.second; if (!F) continue; for (auto uit = F->user_begin(); uit != F->user_end();) { CallInst *CI = cast<CallInst>(*(uit++)); if (!GV) GV = GetOrCreateIsHelperGV(*F->getParent(), hlslOP); IRBuilder<> Builder(CI); Value *V = Builder.CreateLoad(GV); V = Builder.CreateTrunc(V, I1Ty); CI->replaceAllUsesWith(V); CI->eraseFromParent(); } hlslOP->RemoveFunction(F); F->eraseFromParent(); bDone = false; break; } } } void ConvertDiscardToStoreGV(hlsl::OP *hlslOP) { GlobalVariable *GV = nullptr; Type *I32Ty = Type::getInt32Ty(hlslOP->GetCtx()); for (auto it : hlslOP->GetOpFuncList(DXIL::OpCode::Discard)) { Function *F = it.second; if (!F) continue; for (auto uit = F->user_begin(); uit != F->user_end();) { CallInst *CI = cast<CallInst>(*(uit++)); if (!GV) GV = GetIsHelperGV(*F->getParent()); // If we don't already have a global for this, // we didn't have any IsHelper() calls, so no need to add one now. if (!GV) return; IRBuilder<> Builder(CI); Value *Cond = Builder.CreateZExt(DxilInst_Discard(CI).get_condition(), I32Ty); Builder.CreateStore(Cond, GV); } } } /////////////////////////////////////////////////// void patchDxil_1_6(Module &M, hlsl::OP *hlslOP, unsigned ValMajor, unsigned ValMinor) { RemoveAnnotateHandle(hlslOP); // Convert IsHelperLane() on down-level targets const ShaderModel *pSM = M.GetDxilModule().GetShaderModel(); if (pSM->IsLib() || pSM->IsPS()) { ConvertIsHelperToLoadGV(hlslOP); ConvertDiscardToStoreGV(hlslOP); InitIsHelperGV(M); // Set linkage of dx.ishelper to internal for validator version < 1.6 // This means IsHelperLane() fallback code will not return correct result // in an exported function linked to a PS in another library in this case. // But it won't pass validation otherwise. if (pSM->IsLib() && DXIL::CompareVersions(ValMajor, ValMinor, 1, 6) < 1) { if (GlobalVariable *GV = GetIsHelperGV(M)) { GV->setLinkage(GlobalValue::InternalLinkage); } } } else { ReplaceIsHelperWithConstFalse(hlslOP); } } void convertQuadVote(Module &M, hlsl::OP *hlslOP) { for (auto FnIt : hlslOP->GetOpFuncList(DXIL::OpCode::QuadVote)) { Function *F = FnIt.second; if (!F) continue; for (auto UserIt = F->user_begin(); UserIt != F->user_end();) { CallInst *CI = cast<CallInst>(*(UserIt++)); IRBuilder<> B(CI); DXASSERT_NOMSG(CI->getOperand(1)->getType() == Type::getInt1Ty(M.getContext())); Type *i32Ty = Type::getInt32Ty(M.getContext()); Value *Cond = B.CreateSExt(CI->getOperand(1), i32Ty); Function *QuadOpFn = hlslOP->GetOpFunc(DXIL::OpCode::QuadOp, i32Ty); const std::string &OpName = hlslOP->GetOpCodeName(DXIL::OpCode::QuadOp); Value *refArgs[] = {hlslOP->GetU32Const((unsigned)DXIL::OpCode::QuadOp), Cond, nullptr}; refArgs[2] = hlslOP->GetI8Const((unsigned)DXIL::QuadOpKind::ReadAcrossX); Value *X = B.CreateCall(QuadOpFn, refArgs, OpName); refArgs[2] = hlslOP->GetI8Const((unsigned)DXIL::QuadOpKind::ReadAcrossY); Value *Y = B.CreateCall(QuadOpFn, refArgs, OpName); refArgs[2] = hlslOP->GetI8Const((unsigned)DXIL::QuadOpKind::ReadAcrossDiagonal); Value *Z = B.CreateCall(QuadOpFn, refArgs, OpName); Value *Result = nullptr; uint64_t OpKind = cast<ConstantInt>(CI->getOperand(2))->getZExtValue(); if (OpKind == (uint64_t)DXIL::QuadVoteOpKind::All) { Value *XY = B.CreateAnd(X, Y); Value *XYZ = B.CreateAnd(XY, Z); Result = B.CreateAnd(XYZ, Cond); } else { DXASSERT_NOMSG(OpKind == (uint64_t)DXIL::QuadVoteOpKind::Any); Value *XY = B.CreateOr(X, Y); Value *XYZ = B.CreateOr(XY, Z); Result = B.CreateOr(XYZ, Cond); } Value *Res = B.CreateTrunc(Result, Type::getInt1Ty(M.getContext())); CI->replaceAllUsesWith(Res); CI->eraseFromParent(); } } } // Replace llvm.lifetime.start/.end intrinsics with undef or zeroinitializer // stores (for earlier validator versions) unless the pointer is a global // that has an initializer. // This works around losing scoping information in earlier shader models // that do not support the intrinsics natively. void patchLifetimeIntrinsics(Module &M, unsigned ValMajor, unsigned ValMinor, bool forceZeroStoreLifetimes) { // Get the declarations. This may introduce them if there were none before. Value *StartDecl = Intrinsic::getDeclaration(&M, Intrinsic::lifetime_start); Value *EndDecl = Intrinsic::getDeclaration(&M, Intrinsic::lifetime_end); // Collect all calls to both intrinsics. std::vector<CallInst *> intrinsicCalls; for (Use &U : StartDecl->uses()) { // All users must be call instructions. CallInst *CI = dyn_cast<CallInst>(U.getUser()); DXASSERT(CI, "Expected user of lifetime.start intrinsic to be a CallInst"); intrinsicCalls.push_back(CI); } for (Use &U : EndDecl->uses()) { // All users must be call instructions. CallInst *CI = dyn_cast<CallInst>(U.getUser()); DXASSERT(CI, "Expected user of lifetime.end intrinsic to be a CallInst"); intrinsicCalls.push_back(CI); } // Replace each intrinsic with an undef store. for (CallInst *CI : intrinsicCalls) { // Find the corresponding pointer (bitcast from alloca, global value, an // argument, ...). Value *voidPtr = CI->getArgOperand(1); DXASSERT(voidPtr->getType()->isPointerTy() && voidPtr->getType()->getPointerElementType()->isIntegerTy(8), "Expected operand of lifetime intrinsic to be of type i8*"); Value *ptr = nullptr; if (ConstantExpr *CE = dyn_cast<ConstantExpr>(voidPtr)) { // This can happen if a local variable/array is promoted to a constant // global. In this case we must not introduce a store, since that would // overwrite the constant values in the initializer. Thus, we simply // remove the intrinsic. DXASSERT(CE->getOpcode() == Instruction::BitCast, "expected operand of lifetime intrinsic to be a bitcast"); } else { // Otherwise, it must be a normal bitcast. DXASSERT(isa<BitCastInst>(voidPtr), "Expected operand of lifetime intrinsic to be a bitcast"); BitCastInst *BC = cast<BitCastInst>(voidPtr); ptr = BC->getOperand(0); // If the original pointer is a global with initializer, do not replace // the intrinsic with a store. if (GlobalVariable *GV = dyn_cast<GlobalVariable>(ptr)) if (GV->hasInitializer() || GV->isExternallyInitialized()) ptr = nullptr; } if (ptr) { // Determine the type to use when storing undef. DXASSERT(ptr->getType()->isPointerTy(), "Expected type of operand of lifetime intrinsic bitcast " "operand to be a pointer"); Type *T = ptr->getType()->getPointerElementType(); // Store undef at the location of the start/end intrinsic. // If we are targeting validator version < 6.6 we cannot store undef // since it causes a validation error. As a workaround we store 0, which // achieves mostly the same as storing undef but can cause overhead in // some situations. // We also allow to force zeroinitializer through a flag. if (forceZeroStoreLifetimes || ValMajor < 1 || (ValMajor == 1 && ValMinor < 6)) IRBuilder<>(CI).CreateStore(Constant::getNullValue(T), ptr); else IRBuilder<>(CI).CreateStore(UndefValue::get(T), ptr); } // Erase the intrinsic call and, if it has no uses anymore, the bitcast as // well. DXASSERT_NOMSG(CI->use_empty()); CI->eraseFromParent(); // Erase the bitcast inst if it is not a ConstantExpr. if (BitCastInst *BC = dyn_cast<BitCastInst>(voidPtr)) if (BC->use_empty()) BC->eraseFromParent(); } // Erase the intrinsic declarations. DXASSERT_NOMSG(StartDecl->use_empty()); DXASSERT_NOMSG(EndDecl->use_empty()); cast<Function>(StartDecl)->eraseFromParent(); cast<Function>(EndDecl)->eraseFromParent(); } bool runOnModule(Module &M) override { // Remove all the poisoned values and emit errors if necessary. (void)hlsl::FinalizePoisonValues(M); if (M.HasDxilModule()) { DxilModule &DM = M.GetDxilModule(); unsigned ValMajor = 0; unsigned ValMinor = 0; DM.GetValidatorVersion(ValMajor, ValMinor); unsigned DxilMajor = 0; unsigned DxilMinor = 0; DM.GetDxilVersion(DxilMajor, DxilMinor); DenseSet<unsigned> IllegalMDSet; unsigned DxilTempMDKind = M.getContext().getMDKindID(DxilMDHelper::kDxilTempAllocaMDName); IllegalMDSet.insert(DxilTempMDKind); // Skip validation patch for lib. bool IsLib = DM.GetShaderModel()->IsLib(); if (!IsLib) { if (DXIL::CompareVersions(ValMajor, ValMinor, 1, 1) <= 0) { IllegalMDSet.insert(LLVMContext::MD_tbaa); IllegalMDSet.insert(LLVMContext::MD_prof); for (unsigned I = LLVMContext::MD_fpmath + 1; I <= LLVMContext::MD_dereferenceable_or_null; ++I) { IllegalMDSet.insert(I); } } } patchInstructionMetadata(M, IllegalMDSet); // Replace lifetime intrinsics if requested or necessary. const bool forceZeroStoreLifetimes = DM.GetForceZeroStoreLifetimes(); if (forceZeroStoreLifetimes || DXIL::CompareVersions(DxilMajor, DxilMinor, 1, 6) < 0) { patchLifetimeIntrinsics(M, ValMajor, ValMinor, forceZeroStoreLifetimes); } hlsl::OP *hlslOP = DM.GetOP(); // Basic down-conversions for Dxil < 1.6 if (DXIL::CompareVersions(DxilMajor, DxilMinor, 1, 6) < 0) { patchDxil_1_6(M, hlslOP, ValMajor, ValMinor); } // Convert quad vote if (DXIL::CompareVersions(DxilMajor, DxilMinor, 1, 7) < 0) { convertQuadVote(M, DM.GetOP()); } // Remove store undef output. RemoveStoreUndefOutput(M, hlslOP); if (!IsLib) { // Set used masks for signature elements MarkUsedSignatureElements(DM.GetEntryFunction(), DM); if (DM.GetShaderModel()->IsHS()) MarkUsedSignatureElements(DM.GetPatchConstantFunction(), DM); } // Adding warning for pixel shader with unassigned target if (DM.GetShaderModel()->IsPS()) { DxilSignature &sig = DM.GetOutputSignature(); for (auto &Elt : sig.GetElements()) { if (Elt->GetKind() == Semantic::Kind::Target && Elt->GetUsageMask() != Elt->GetColsAsMask()) { dxilutil::EmitWarningOnContext( M.getContext(), "Declared output " + llvm::Twine(Elt->GetName()) + llvm::Twine(Elt->GetSemanticStartIndex()) + " not fully written in shader."); } } } // Turn dx.break() conditional into global LowerDxBreak(M); RemoveUnusedStaticGlobal(M); // Remove unnecessary address space casts. CleanupSharedMemoryAddrSpaceCast(M); // Clear inbound for GEP which has none-const index. LegalizeSharedMemoryGEPInbound(M); // Strip parameters of entry function. StripEntryParameters(M, DM, IsLib); // Remove unused types from type annotations DM.RemoveUnusedTypeAnnotations(); // Update flags to reflect any changes. DM.CollectShaderFlagsForModule(); // Update Validator Version DM.UpgradeToMinValidatorVersion(); // Clear intermediate options that shouldn't be in the final DXIL DM.ClearIntermediateOptions(); // Remove unused AllocateRayQuery calls RemoveUnusedRayQuery(M); if (IsLib && DXIL::CompareVersions(ValMajor, ValMinor, 1, 4) <= 0) { // 1.4 validator requires function annotations for all functions AddFunctionAnnotationForInitializers(M, DM); } // Fix DIExpression fragments that cover whole variables LegalizeDbgFragments(M); return true; } return false; } private: void RemoveUnusedStaticGlobal(Module &M) { // Remove unused internal global. std::vector<GlobalVariable *> staticGVs; for (GlobalVariable &GV : M.globals()) { if (dxilutil::IsStaticGlobal(&GV) || dxilutil::IsSharedMemoryGlobal(&GV)) { staticGVs.emplace_back(&GV); } } for (GlobalVariable *GV : staticGVs) { bool onlyStoreUse = true; for (User *user : GV->users()) { if (isa<StoreInst>(user)) continue; if (isa<ConstantExpr>(user) && user->user_empty()) continue; onlyStoreUse = false; break; } if (onlyStoreUse) { for (auto UserIt = GV->user_begin(); UserIt != GV->user_end();) { Value *User = *(UserIt++); if (Instruction *I = dyn_cast<Instruction>(User)) { I->eraseFromParent(); } else { ConstantExpr *CE = cast<ConstantExpr>(User); CE->dropAllReferences(); } } GV->eraseFromParent(); } } } static bool BitPieceCoversEntireVar(DIExpression *expr, DILocalVariable *var, DITypeIdentifierMap &TypeIdentifierMap) { if (expr->isBitPiece()) { DIType *ty = var->getType().resolve(TypeIdentifierMap); return expr->getBitPieceOffset() == 0 && expr->getBitPieceSize() == ty->getSizeInBits(); } return false; } static void LegalizeDbgFragmentsForDbgIntrinsic(Function *f, DITypeIdentifierMap &TypeIdentifierMap) { Intrinsic::ID intrinsic = f->getIntrinsicID(); DIBuilder dib(*f->getParent()); if (intrinsic == Intrinsic::dbg_value) { for (auto it = f->user_begin(), end = f->user_end(); it != end;) { User *u = *(it++); DbgValueInst *di = cast<DbgValueInst>(u); Value *value = di->getValue(); if (!value) { di->eraseFromParent(); continue; } DIExpression *expr = di->getExpression(); DILocalVariable *var = di->getVariable(); if (BitPieceCoversEntireVar(expr, var, TypeIdentifierMap)) { dib.insertDbgValueIntrinsic(value, 0, var, DIExpression::get(di->getContext(), {}), di->getDebugLoc(), di); di->eraseFromParent(); } } } else if (intrinsic == Intrinsic::dbg_declare) { for (auto it = f->user_begin(), end = f->user_end(); it != end;) { User *u = *(it++); DbgDeclareInst *di = cast<DbgDeclareInst>(u); Value *addr = di->getAddress(); if (!addr) { di->eraseFromParent(); continue; } DIExpression *expr = di->getExpression(); DILocalVariable *var = di->getVariable(); if (BitPieceCoversEntireVar(expr, var, TypeIdentifierMap)) { dib.insertDeclare(addr, var, DIExpression::get(di->getContext(), {}), di->getDebugLoc(), di); di->eraseFromParent(); } } } } static void LegalizeDbgFragments(Module &M) { DITypeIdentifierMap TypeIdentifierMap; if (Function *f = M.getFunction(Intrinsic::getName(Intrinsic::dbg_value))) { LegalizeDbgFragmentsForDbgIntrinsic(f, TypeIdentifierMap); } if (Function *f = M.getFunction(Intrinsic::getName(Intrinsic::dbg_declare))) { LegalizeDbgFragmentsForDbgIntrinsic(f, TypeIdentifierMap); } } void RemoveStoreUndefOutput(Module &M, hlsl::OP *hlslOP) { for (iplist<Function>::iterator F : M.getFunctionList()) { if (!hlslOP->IsDxilOpFunc(F)) continue; DXIL::OpCodeClass opClass; bool bHasOpClass = hlslOP->GetOpCodeClass(F, opClass); DXASSERT_LOCALVAR(bHasOpClass, bHasOpClass, "else not a dxil op func"); if (opClass != DXIL::OpCodeClass::StoreOutput) continue; for (auto it = F->user_begin(); it != F->user_end();) { CallInst *CI = dyn_cast<CallInst>(*(it++)); if (!CI) continue; Value *V = CI->getArgOperand(DXIL::OperandIndex::kStoreOutputValOpIdx); // Remove the store of undef. if (isa<UndefValue>(V)) CI->eraseFromParent(); } } } void LegalizeSharedMemoryGEPInbound(Module &M) { const DataLayout &DL = M.getDataLayout(); // Clear inbound for GEP which has none-const index. for (GlobalVariable &GV : M.globals()) { if (dxilutil::IsSharedMemoryGlobal(&GV)) { CheckInBoundForTGSM(GV, DL); } } } void StripEntryParameters(Module &M, DxilModule &DM, bool IsLib) { DenseMap<const Function *, DISubprogram *> FunctionDIs = makeSubprogramMap(M); // Strip parameters of entry function. if (!IsLib) { if (Function *OldPatchConstantFunc = DM.GetPatchConstantFunction()) { Function *NewPatchConstantFunc = StripFunctionParameter(OldPatchConstantFunc, DM, FunctionDIs); if (NewPatchConstantFunc) { DM.SetPatchConstantFunction(NewPatchConstantFunc); // Erase once the DxilModule doesn't track the old function anymore DXASSERT(DM.IsPatchConstantShader(NewPatchConstantFunc) && !DM.IsPatchConstantShader(OldPatchConstantFunc), "Error while migrating to parameter-stripped patch constant " "function."); OldPatchConstantFunc->eraseFromParent(); } } if (Function *OldEntryFunc = DM.GetEntryFunction()) { StringRef Name = DM.GetEntryFunctionName(); OldEntryFunc->setName(Name); Function *NewEntryFunc = StripFunctionParameter(OldEntryFunc, DM, FunctionDIs); if (NewEntryFunc) { DM.SetEntryFunction(NewEntryFunc); OldEntryFunc->eraseFromParent(); } } } else { std::vector<Function *> entries; // Handle when multiple hull shaders point to the same patch constant // function MapVector<Function *, llvm::SmallVector<Function *, 2>> PatchConstantFuncUsers; for (iplist<Function>::iterator F : M.getFunctionList()) { if (DM.IsEntryThatUsesSignatures(F)) { auto *FT = F->getFunctionType(); // Only do this when has parameters. if (FT->getNumParams() > 0 || !FT->getReturnType()->isVoidTy()) { entries.emplace_back(F); } DxilFunctionProps &props = DM.GetDxilFunctionProps(F); if (props.IsHS() && props.ShaderProps.HS.patchConstantFunc) { FunctionType *PatchConstantFuncTy = props.ShaderProps.HS.patchConstantFunc->getFunctionType(); if (PatchConstantFuncTy->getNumParams() > 0 || !PatchConstantFuncTy->getReturnType()->isVoidTy()) { // Accumulate all hull shaders using a given patch constant // function, so we can update it once and fix all hull shaders, // without having an intermediary state where some hull shaders // point to a destroyed patch constant function. PatchConstantFuncUsers[props.ShaderProps.HS.patchConstantFunc] .emplace_back(F); } } } } // Strip patch constant functions first for (auto &PatchConstantFuncEntry : PatchConstantFuncUsers) { Function *OldPatchConstantFunc = PatchConstantFuncEntry.first; Function *NewPatchConstantFunc = StripFunctionParameter(OldPatchConstantFunc, DM, FunctionDIs); if (NewPatchConstantFunc) { // Update all user hull shaders for (Function *HullShaderFunc : PatchConstantFuncEntry.second) DM.SetPatchConstantFunctionForHS(HullShaderFunc, NewPatchConstantFunc); // Erase once the DxilModule doesn't track the old function anymore DXASSERT(DM.IsPatchConstantShader(NewPatchConstantFunc) && !DM.IsPatchConstantShader(OldPatchConstantFunc), "Error while migrating to parameter-stripped patch constant " "function."); OldPatchConstantFunc->eraseFromParent(); } } for (Function *OldEntry : entries) { Function *NewEntry = StripFunctionParameter(OldEntry, DM, FunctionDIs); if (NewEntry) OldEntry->eraseFromParent(); } } } void AddFunctionAnnotationForInitializers(Module &M, DxilModule &DM) { if (GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors")) { if (isa<ConstantAggregateZero>(GV->getInitializer())) { DXASSERT_NOMSG(GV->user_empty()); GV->eraseFromParent(); return; } ConstantArray *init = cast<ConstantArray>(GV->getInitializer()); for (auto V : init->operand_values()) { if (isa<ConstantAggregateZero>(V)) continue; ConstantStruct *CS = cast<ConstantStruct>(V); if (isa<ConstantPointerNull>(CS->getOperand(1))) continue; Function *F = cast<Function>(CS->getOperand(1)); if (DM.GetTypeSystem().GetFunctionAnnotation(F) == nullptr) DM.GetTypeSystem().AddFunctionAnnotation(F); } } } void RemoveUnusedRayQuery(Module &M) { hlsl::OP *hlslOP = M.GetDxilModule().GetOP(); llvm::Function *AllocFn = hlslOP->GetOpFunc( DXIL::OpCode::AllocateRayQuery, Type::getVoidTy(M.getContext())); SmallVector<CallInst *, 4> DeadInsts; for (auto U : AllocFn->users()) { if (CallInst *CI = dyn_cast<CallInst>(U)) { if (CI->user_empty()) { DeadInsts.emplace_back(CI); } } } for (auto CI : DeadInsts) { CI->eraseFromParent(); } if (AllocFn->user_empty()) { AllocFn->eraseFromParent(); } } // Convert all uses of dx.break() into per-function load/cmp of dx.break.cond // global constant void LowerDxBreak(Module &M) { if (Function *BreakFunc = M.getFunction(DXIL::kDxBreakFuncName)) { if (!BreakFunc->use_empty()) { llvm::Type *i32Ty = llvm::Type::getInt32Ty(M.getContext()); Type *i32ArrayTy = ArrayType::get(i32Ty, 1); unsigned int Values[1] = {0}; Constant *InitialValue = ConstantDataArray::get(M.getContext(), Values); Constant *GV = new GlobalVariable(M, i32ArrayTy, true, GlobalValue::InternalLinkage, InitialValue, DXIL::kDxBreakCondName); Constant *Indices[] = {ConstantInt::get(i32Ty, 0), ConstantInt::get(i32Ty, 0)}; Constant *Gep = ConstantExpr::getGetElementPtr(nullptr, GV, Indices); SmallDenseMap<llvm::Function *, llvm::ICmpInst *, 16> DxBreakCmpMap; // Replace all uses of dx.break with references to the constant global for (auto I = BreakFunc->user_begin(), E = BreakFunc->user_end(); I != E;) { User *U = *I++; CallInst *CI = cast<CallInst>(U); Function *F = CI->getParent()->getParent(); ICmpInst *Cmp = DxBreakCmpMap.lookup(F); if (!Cmp) { Instruction *IP = dxilutil::FindAllocaInsertionPt(F); LoadInst *LI = new LoadInst(Gep, nullptr, false, IP); Cmp = new ICmpInst(IP, ICmpInst::ICMP_EQ, LI, llvm::ConstantInt::get(i32Ty, 0)); DxBreakCmpMap[F] = Cmp; } CI->replaceAllUsesWith(Cmp); CI->eraseFromParent(); } } BreakFunc->eraseFromParent(); } for (Function &F : M) { for (BasicBlock &BB : F) { if (BranchInst *BI = dyn_cast<BranchInst>(BB.getTerminator())) { BI->setMetadata(DXIL::kDxBreakMDName, nullptr); } } } } }; } // namespace char DxilFinalizeModule::ID = 0; ModulePass *llvm::createDxilFinalizeModulePass() { return new DxilFinalizeModule(); } INITIALIZE_PASS(DxilFinalizeModule, "hlsl-dxilfinalize", "HLSL DXIL Finalize Module", false, false) /////////////////////////////////////////////////////////////////////////////// namespace { typedef MapVector<PHINode *, SmallVector<Value *, 8>> PHIReplacementMap; bool RemoveAddrSpaceCasts(Value *Val, Value *NewVal, PHIReplacementMap &phiReplacements, DenseMap<Value *, Value *> &valueMap) { bool bChanged = false; for (auto itU = Val->use_begin(), itEnd = Val->use_end(); itU != itEnd;) { Use &use = *(itU++); User *user = use.getUser(); Value *userReplacement = user; bool bConstructReplacement = false; bool bCleanupInst = false; auto valueMapIter = valueMap.find(user); if (valueMapIter != valueMap.end()) userReplacement = valueMapIter->second; else if (Val != NewVal) bConstructReplacement = true; if (ConstantExpr *CE = dyn_cast<ConstantExpr>(user)) { if (CE->getOpcode() == Instruction::BitCast) { if (bConstructReplacement) { // Replicate bitcast in target address space Type *NewTy = PointerType::get(CE->getType()->getPointerElementType(), NewVal->getType()->getPointerAddressSpace()); userReplacement = ConstantExpr::getBitCast(cast<Constant>(NewVal), NewTy); } } else if (CE->getOpcode() == Instruction::GetElementPtr) { if (bConstructReplacement) { // Replicate GEP in target address space GEPOperator *GEP = cast<GEPOperator>(CE); SmallVector<Value *, 8> idxList(GEP->idx_begin(), GEP->idx_end()); userReplacement = ConstantExpr::getGetElementPtr( nullptr, cast<Constant>(NewVal), idxList, GEP->isInBounds()); } } else if (CE->getOpcode() == Instruction::AddrSpaceCast) { userReplacement = NewVal; bConstructReplacement = false; } else { DXASSERT(false, "RemoveAddrSpaceCasts: unhandled pointer ConstantExpr"); } } else if (Instruction *I = dyn_cast<Instruction>(user)) { if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(user)) { if (bConstructReplacement) { IRBuilder<> Builder(GEP); SmallVector<Value *, 8> idxList(GEP->idx_begin(), GEP->idx_end()); if (GEP->isInBounds()) userReplacement = Builder.CreateInBoundsGEP(NewVal, idxList, GEP->getName()); else userReplacement = Builder.CreateGEP(NewVal, idxList, GEP->getName()); } } else if (BitCastInst *BC = dyn_cast<BitCastInst>(user)) { if (bConstructReplacement) { IRBuilder<> Builder(BC); Type *NewTy = PointerType::get(BC->getType()->getPointerElementType(), NewVal->getType()->getPointerAddressSpace()); userReplacement = Builder.CreateBitCast(NewVal, NewTy); } } else if (PHINode *PHI = dyn_cast<PHINode>(user)) { // set replacement phi values for PHI pass unsigned numValues = PHI->getNumIncomingValues(); auto &phiValues = phiReplacements[PHI]; if (phiValues.empty()) phiValues.resize(numValues, nullptr); for (unsigned idx = 0; idx < numValues; ++idx) { if (phiValues[idx] == nullptr && PHI->getIncomingValue(idx) == Val) { phiValues[idx] = NewVal; bChanged = true; } } continue; } else if (isa<AddrSpaceCastInst>(user)) { userReplacement = NewVal; bConstructReplacement = false; bCleanupInst = true; } else if (isa<CallInst>(user)) { continue; } else { if (Val != NewVal) { use.set(NewVal); bChanged = true; } continue; } } if (bConstructReplacement && user != userReplacement) valueMap[user] = userReplacement; bChanged |= RemoveAddrSpaceCasts(user, userReplacement, phiReplacements, valueMap); if (bCleanupInst && user->use_empty()) { // Clean up old instruction if it's now unused. // Safe during this use iteration when only one use of V in instruction. if (Instruction *I = dyn_cast<Instruction>(user)) I->eraseFromParent(); bChanged = true; } } return bChanged; } } // namespace bool CleanupSharedMemoryAddrSpaceCast(Module &M) { bool bChanged = false; // Eliminate address space casts if possible // Collect phi nodes so we can replace iteratively after pass over GVs PHIReplacementMap phiReplacements; DenseMap<Value *, Value *> valueMap; for (GlobalVariable &GV : M.globals()) { if (dxilutil::IsSharedMemoryGlobal(&GV)) { bChanged |= RemoveAddrSpaceCasts(&GV, &GV, phiReplacements, valueMap); } } bool bConverged = false; while (!phiReplacements.empty() && !bConverged) { bConverged = true; for (auto &phiReplacement : phiReplacements) { PHINode *PHI = phiReplacement.first; unsigned origAddrSpace = PHI->getType()->getPointerAddressSpace(); unsigned incomingAddrSpace = UINT_MAX; bool bReplacePHI = true; bool bRemovePHI = false; for (auto V : phiReplacement.second) { if (nullptr == V) { // cannot replace phi (yet) bReplacePHI = false; break; } unsigned addrSpace = V->getType()->getPointerAddressSpace(); if (incomingAddrSpace == UINT_MAX) { incomingAddrSpace = addrSpace; } else if (addrSpace != incomingAddrSpace) { bRemovePHI = true; break; } } if (origAddrSpace == incomingAddrSpace) bRemovePHI = true; if (bRemovePHI) { // Cannot replace phi. Remove it and restart. phiReplacements.erase(PHI); bConverged = false; break; } if (!bReplacePHI) continue; auto &NewVal = valueMap[PHI]; PHINode *NewPHI = nullptr; if (NewVal) { NewPHI = cast<PHINode>(NewVal); } else { IRBuilder<> Builder(PHI); NewPHI = Builder.CreatePHI( PointerType::get(PHI->getType()->getPointerElementType(), incomingAddrSpace), PHI->getNumIncomingValues(), PHI->getName()); NewVal = NewPHI; for (unsigned idx = 0; idx < PHI->getNumIncomingValues(); idx++) { NewPHI->addIncoming(phiReplacement.second[idx], PHI->getIncomingBlock(idx)); } } if (RemoveAddrSpaceCasts(PHI, NewPHI, phiReplacements, valueMap)) { bConverged = false; bChanged = true; break; } if (PHI->use_empty()) { phiReplacements.erase(PHI); bConverged = false; bChanged = true; break; } } } // Cleanup unused replacement instructions SmallVector<WeakTrackingVH, 8> cleanupInsts; for (auto it : valueMap) { if (isa<Instruction>(it.first)) cleanupInsts.push_back(it.first); if (isa<Instruction>(it.second)) cleanupInsts.push_back(it.second); } for (auto V : cleanupInsts) { if (!V) continue; if (PHINode *PHI = dyn_cast<PHINode>(V)) RecursivelyDeleteDeadPHINode(PHI); else if (Instruction *I = dyn_cast<Instruction>(V)) RecursivelyDeleteTriviallyDeadInstructions(I); } return bChanged; } class DxilCleanupAddrSpaceCast : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilCleanupAddrSpaceCast() : ModulePass(ID) {} StringRef getPassName() const override { return "HLSL DXIL Cleanup Address Space Cast"; } bool runOnModule(Module &M) override { return CleanupSharedMemoryAddrSpaceCast(M); } }; char DxilCleanupAddrSpaceCast::ID = 0; ModulePass *llvm::createDxilCleanupAddrSpaceCastPass() { return new DxilCleanupAddrSpaceCast(); } INITIALIZE_PASS(DxilCleanupAddrSpaceCast, "hlsl-dxil-cleanup-addrspacecast", "HLSL DXIL Cleanup Address Space Cast", false, false) /////////////////////////////////////////////////////////////////////////////// namespace { class DxilEmitMetadata : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilEmitMetadata() : ModulePass(ID) {} StringRef getPassName() const override { return "HLSL DXIL Metadata Emit"; } bool runOnModule(Module &M) override { if (M.HasDxilModule()) { DxilModule::ClearDxilMetadata(M); patchIsFrontfaceTy(M); M.GetDxilModule().EmitDxilMetadata(); return true; } return false; } private: void patchIsFrontfaceTy(Module &M); }; void patchIsFrontface(DxilSignatureElement &Elt, bool bForceUint) { // If force to uint, change i1 to u32. // If not force to uint, change u32 to i1. if (bForceUint && Elt.GetCompType() == CompType::Kind::I1) Elt.SetCompType(CompType::Kind::U32); else if (!bForceUint && Elt.GetCompType() == CompType::Kind::U32) Elt.SetCompType(CompType::Kind::I1); } void patchIsFrontface(DxilSignature &sig, bool bForceUint) { for (auto &Elt : sig.GetElements()) { if (Elt->GetSemantic()->GetKind() == Semantic::Kind::IsFrontFace) { patchIsFrontface(*Elt, bForceUint); } } } void DxilEmitMetadata::patchIsFrontfaceTy(Module &M) { DxilModule &DM = M.GetDxilModule(); const ShaderModel *pSM = DM.GetShaderModel(); if (!pSM->IsGS() && !pSM->IsPS()) return; unsigned ValMajor, ValMinor; DM.GetValidatorVersion(ValMajor, ValMinor); bool bForceUint = ValMajor == 0 || (ValMajor >= 1 && ValMinor >= 2); if (pSM->IsPS()) { patchIsFrontface(DM.GetInputSignature(), bForceUint); } else if (pSM->IsGS()) { patchIsFrontface(DM.GetOutputSignature(), bForceUint); } } } // namespace char DxilEmitMetadata::ID = 0; ModulePass *llvm::createDxilEmitMetadataPass() { return new DxilEmitMetadata(); } INITIALIZE_PASS(DxilEmitMetadata, "hlsl-dxilemit", "HLSL DXIL Metadata Emit", false, false) /////////////////////////////////////////////////////////////////////////////// namespace { const StringRef UniNoWaveSensitiveGradientErrMsg = "Gradient operations are not affected by wave-sensitive data or control " "flow."; class DxilValidateWaveSensitivity : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilValidateWaveSensitivity() : ModulePass(ID) {} StringRef getPassName() const override { return "HLSL DXIL wave sensitiveity validation"; } bool runOnModule(Module &M) override { // Only check ps and lib profile. DxilModule &DM = M.GetDxilModule(); const ShaderModel *pSM = DM.GetShaderModel(); if (!pSM->IsPS() && !pSM->IsLib()) return false; SmallVector<CallInst *, 16> gradientOps; SmallVector<CallInst *, 16> barriers; SmallVector<CallInst *, 16> waveOps; for (auto &F : M) { if (!F.isDeclaration()) continue; for (User *U : F.users()) { CallInst *CI = dyn_cast<CallInst>(U); if (!CI) continue; Function *FCalled = CI->getCalledFunction(); if (!FCalled || !FCalled->isDeclaration()) continue; if (!hlsl::OP::IsDxilOpFunc(FCalled)) continue; DXIL::OpCode dxilOpcode = hlsl::OP::GetDxilOpFuncCallInst(CI); if (OP::IsDxilOpWave(dxilOpcode)) { waveOps.emplace_back(CI); } if (OP::IsDxilOpGradient(dxilOpcode)) { gradientOps.push_back(CI); } if (dxilOpcode == DXIL::OpCode::Barrier) { barriers.push_back(CI); } } } // Skip if not have wave op. if (waveOps.empty()) return false; // Skip if no gradient op. if (gradientOps.empty()) return false; for (auto &F : M) { if (F.isDeclaration()) continue; SetVector<Instruction *> localGradientArgs; for (CallInst *CI : gradientOps) { if (CI->getParent()->getParent() == &F) { for (Value *V : CI->arg_operands()) { // TODO: only check operand which used for gradient calculation. Instruction *vI = dyn_cast<Instruction>(V); if (!vI) continue; localGradientArgs.insert(vI); } } } if (localGradientArgs.empty()) continue; PostDominatorTree PDT; PDT.runOnFunction(F); std::unique_ptr<WaveSensitivityAnalysis> WaveVal( WaveSensitivityAnalysis::create(PDT)); WaveVal->Analyze(&F); for (Instruction *gradArg : localGradientArgs) { // Check operand of gradient ops, not gradientOps itself. if (WaveVal->IsWaveSensitive(gradArg)) { dxilutil::EmitWarningOnInstruction(gradArg, UniNoWaveSensitiveGradientErrMsg); } } } return false; } }; } // namespace char DxilValidateWaveSensitivity::ID = 0; ModulePass *llvm::createDxilValidateWaveSensitivityPass() { return new DxilValidateWaveSensitivity(); } INITIALIZE_PASS(DxilValidateWaveSensitivity, "hlsl-validate-wave-sensitivity", "HLSL DXIL wave sensitiveity validation", false, false) namespace { // Cull blocks from BreakBBs that containing instructions that are sensitive to // the wave-sensitive Inst Sensitivity entails being an eventual user of the // Inst and also belonging to a block with a break conditional on dx.break that // breaks out of a loop that contains WaveCI LInfo is needed to determine loop // contents. Visited is needed to prevent infinite looping. static void CullSensitiveBlocks(LoopInfo *LInfo, Loop *WaveLoop, BasicBlock *LastBB, Instruction *Inst, std::unordered_set<Instruction *> &Visited, SmallDenseMap<BasicBlock *, Instruction *, 16> &BreakBBs) { BasicBlock *BB = Inst->getParent(); Loop *BreakLoop = LInfo->getLoopFor(BB); // If this instruction isn't in a loop, there is no need to track its // sensitivity further if (!BreakLoop || BreakBBs.empty()) return; // To prevent infinite looping, only visit each instruction once if (!Visited.insert(Inst).second) return; // If this BB wasn't already just processed, handle it now if (LastBB != BB) { // Determine if the instruction's block has an artificially-conditional // break and breaks out of a loop that contains the waveCI BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); if (BI && BI->isConditional() && BreakLoop->contains(WaveLoop)) BreakBBs.erase(BB); } // Recurse on the users for (User *U : Inst->users()) { Instruction *I = cast<Instruction>(U); CullSensitiveBlocks(LInfo, WaveLoop, BB, I, Visited, BreakBBs); } } // Collect blocks that end in a dx.break dependent branch by tracing the // descendants of BreakFunc that are found in ThisFunc and store the block and // call instruction in BreakBBs static void CollectBreakBlocks(Function *BreakFunc, Function *ThisFunc, SmallDenseMap<BasicBlock *, Instruction *, 16> &BreakBBs) { for (User *U : BreakFunc->users()) { SmallVector<User *, 16> WorkList; Instruction *CI = cast<Instruction>(U); // If this user doesn't pertain to the current function, skip it. if (CI->getParent()->getParent() != ThisFunc) continue; WorkList.append(CI->user_begin(), CI->user_end()); while (!WorkList.empty()) { Instruction *I = dyn_cast<Instruction>(WorkList.pop_back_val()); // When we find a Branch that depends on dx.break, save it and stop // This should almost always be the first user of the Call Inst // If not, iterate on the users if (BranchInst *BI = dyn_cast<BranchInst>(I)) BreakBBs[BI->getParent()] = CI; else WorkList.append(I->user_begin(), I->user_end()); } } } // A pass to remove conditions from breaks that do not contain instructions that // depend on wave operations that are in the loop that the break leaves. class CleanupDxBreak : public FunctionPass { public: static char ID; // Pass identification, replacement for typeid explicit CleanupDxBreak() : FunctionPass(ID) {} StringRef getPassName() const override { return "HLSL Remove unnecessary dx.break conditions"; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<LoopInfoWrapperPass>(); } LoopInfo *LInfo; bool runOnFunction(Function &F) override { if (F.isDeclaration()) return false; Module *M = F.getEntryBlock().getModule(); Function *BreakFunc = M->getFunction(DXIL::kDxBreakFuncName); if (!BreakFunc) return false; LInfo = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); // Collect the blocks that depend on dx.break and the instructions that call // dx.break() SmallDenseMap<BasicBlock *, Instruction *, 16> BreakBBs; CollectBreakBlocks(BreakFunc, &F, BreakBBs); if (BreakBBs.empty()) return false; // Collect all wave calls in this function and group by loop SmallDenseMap<Loop *, SmallVector<CallInst *, 8>, 16> WaveCalls; for (Function &IF : M->functions()) { HLOpcodeGroup opgroup = hlsl::GetHLOpcodeGroup(&IF); // Only consider wave-sensitive intrinsics or extintrinsics if (IF.isDeclaration() && IsHLWaveSensitive(&IF) && !BreakBBs.empty() && (opgroup == HLOpcodeGroup::HLIntrinsic || opgroup == HLOpcodeGroup::HLExtIntrinsic)) { // For each user of the function, trace all its users to remove the // blocks for (User *U : IF.users()) { CallInst *CI = cast<CallInst>(U); if (CI->getParent()->getParent() == &F) { Loop *WaveLoop = LInfo->getLoopFor(CI->getParent()); WaveCalls[WaveLoop].emplace_back(CI); } } } } // For each wave operation, remove all the dx.break blocks that are // sensitive to it for (DenseMap<Loop *, SmallVector<CallInst *, 8>>::iterator I = WaveCalls.begin(), E = WaveCalls.end(); I != E; ++I) { Loop *loop = I->first; std::unordered_set<Instruction *> Visited; for (CallInst *CI : I->second) { CullSensitiveBlocks(LInfo, loop, nullptr, CI, Visited, BreakBBs); } } bool Changed = false; // Revert artificially conditional breaks in non-wave-sensitive blocks that // remain in BreakBBs Constant *C = ConstantInt::get(Type::getInt1Ty(M->getContext()), 1); for (auto &BB : BreakBBs) { // Replace the call instruction with a constant boolen BB.second->replaceAllUsesWith(C); BB.second->eraseFromParent(); Changed = true; } return Changed; } }; } // namespace char CleanupDxBreak::ID = 0; INITIALIZE_PASS_BEGIN(CleanupDxBreak, "hlsl-cleanup-dxbreak", "HLSL Remove unnecessary dx.break conditions", false, false) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_END(CleanupDxBreak, "hlsl-cleanup-dxbreak", "HLSL Remove unnecessary dx.break conditions", false, false) FunctionPass *llvm::createCleanupDxBreakPass() { return new CleanupDxBreak(); } /////////////////////////////////////////////////////////////////////////////// namespace { class DxilModuleInit : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit DxilModuleInit() : ModulePass(ID) {} StringRef getPassName() const override { return "Create DXIL Module for opt tests"; } bool runOnModule(Module &M) override { M.GetOrCreateDxilModule(); return true; } }; } // namespace char DxilModuleInit::ID = 0; ModulePass *llvm::createDxilModuleInitPass() { return new DxilModuleInit(); } INITIALIZE_PASS(DxilModuleInit, "hlsl-dxil-module-init", "Create DXIL Module for opt tests", false, false) ///////////////////////////////////////////////////////////////////////////////
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilPreserveAllOutputs.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilPreserveAllOutputs.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Ensure we store to all elements in the output signature. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilInstructions.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilSignatureElement.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/Support/Global.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include <llvm/ADT/DenseSet.h> using namespace llvm; using namespace hlsl; namespace { class OutputWrite { public: explicit OutputWrite(CallInst *call) : m_Call(call) { assert(DxilInst_StoreOutput(call) || DxilInst_StoreVertexOutput(call) || DxilInst_StorePrimitiveOutput(call) || DxilInst_StorePatchConstant(call)); } unsigned GetSignatureID() const { Value *id = m_Call->getOperand(SignatureIndex); return cast<ConstantInt>(id)->getLimitedValue(); } DxilSignatureElement &GetSignatureElement(DxilModule &DM) const { if (DxilInst_StorePatchConstant(m_Call) || DxilInst_StorePrimitiveOutput(m_Call)) return DM.GetPatchConstOrPrimSignature().GetElement(GetSignatureID()); else return DM.GetOutputSignature().GetElement(GetSignatureID()); } CallInst *GetStore() const { return m_Call; } Value *GetValue() const { return m_Call->getOperand(ValueIndex); } Value *GetRow() const { return m_Call->getOperand(RowIndex); } Value *GetColumn() const { return m_Call->getOperand(ColumnIndex); } void DeleteStore() { m_Call->eraseFromParent(); m_Call = nullptr; } private: CallInst *m_Call; enum OperandIndex { SignatureIndex = 1, RowIndex = 2, ColumnIndex = 3, ValueIndex = 4, }; }; class OutputElement { public: explicit OutputElement(const DxilSignatureElement &outputElement) : m_OutputElement(outputElement), m_Rows(outputElement.GetRows()), m_Columns(outputElement.GetCols()) {} void CreateAlloca(IRBuilder<> &allocaBuilder) { LLVMContext &context = allocaBuilder.getContext(); Type *elementType = m_OutputElement.GetCompType().GetLLVMType(context); Type *allocaType = nullptr; if (IsSingleElement()) allocaType = elementType; else allocaType = ArrayType::get(elementType, NumElements()); m_Alloca = allocaBuilder.CreateAlloca(allocaType, nullptr, m_OutputElement.GetName()); } void StoreTemp(IRBuilder<> &builder, Value *row, Value *col, Value *value) const { Value *addr = GetTempAddr(builder, row, col); builder.CreateStore(value, addr); } void StoreOutput(IRBuilder<> &builder, DxilModule &DM) const { for (unsigned row = 0; row < m_Rows; ++row) for (unsigned col = 0; col < m_Columns; ++col) { StoreOutput(builder, DM, row, col); } } unsigned NumElements() const { return m_Rows * m_Columns; } private: const DxilSignatureElement &m_OutputElement; unsigned m_Rows; unsigned m_Columns; AllocaInst *m_Alloca; bool IsSingleElement() const { return m_Rows == 1 && m_Columns == 1; } Value *GetAsI32(IRBuilder<> &builder, Value *col) const { assert(col->getType()->isIntegerTy()); Type *i32Ty = builder.getInt32Ty(); if (col->getType() != i32Ty) { if (col->getType()->getScalarSizeInBits() > i32Ty->getScalarSizeInBits()) col = builder.CreateTrunc(col, i32Ty); else col = builder.CreateZExt(col, i32Ty); } return col; } Value *GetTempAddr(IRBuilder<> &builder, Value *row, Value *col) const { // Load directly from alloca for non-array output. if (IsSingleElement()) return m_Alloca; else return CreateGEP(builder, row, col); } Value *CreateGEP(IRBuilder<> &builder, Value *row, Value *col) const { assert(m_Alloca); Constant *rowStride = ConstantInt::get(row->getType(), m_Columns); Value *rowOffset = builder.CreateMul(row, rowStride); Value *index = builder.CreateAdd(rowOffset, GetAsI32(builder, col)); return builder.CreateInBoundsGEP(m_Alloca, {builder.getInt32(0), index}); } Value *LoadTemp(IRBuilder<> &builder, Value *row, Value *col) const { Value *addr = GetTempAddr(builder, row, col); return builder.CreateLoad(addr); } void StoreOutput(IRBuilder<> &builder, DxilModule &DM, unsigned row, unsigned col) const { Value *opcodeV = builder.getInt32(static_cast<unsigned>(GetOutputOpCode())); Value *sigID = builder.getInt32(m_OutputElement.GetID()); Value *rowV = builder.getInt32(row); Value *colV = builder.getInt8(col); Value *val = LoadTemp(builder, rowV, colV); Value *args[] = {opcodeV, sigID, rowV, colV, val}; Function *Store = GetOutputFunction(DM); builder.CreateCall(Store, args); } DXIL::OpCode GetOutputOpCode() const { if (m_OutputElement.IsPatchConstOrPrim()) { if (m_OutputElement.GetSigPointKind() == DXIL::SigPointKind::PCOut) return DXIL::OpCode::StorePatchConstant; else { assert(m_OutputElement.GetSigPointKind() == DXIL::SigPointKind::MSPOut); return DXIL::OpCode::StorePrimitiveOutput; } } else return DXIL::OpCode::StoreOutput; } Function *GetOutputFunction(DxilModule &DM) const { hlsl::OP *opInfo = DM.GetOP(); return opInfo->GetOpFunc( GetOutputOpCode(), m_OutputElement.GetCompType().GetLLVMBaseType(DM.GetCtx())); } }; class DxilPreserveAllOutputs : public FunctionPass { private: public: static char ID; // Pass identification, replacement for typeid DxilPreserveAllOutputs() : FunctionPass(ID) {} StringRef getPassName() const override { return "DXIL preserve all outputs"; } bool runOnFunction(Function &F) override; private: typedef std::vector<OutputWrite> OutputVec; typedef std::map<unsigned, OutputElement> OutputMap; OutputVec collectOutputStores(Function &F); OutputMap generateOutputMap(const OutputVec &calls, DxilModule &DM); void createTempAllocas(OutputMap &map, IRBuilder<> &builder); void insertTempOutputStores(const OutputVec &calls, const OutputMap &map, IRBuilder<> &builder); void insertFinalOutputStores(Function &F, const OutputMap &outputMap, IRBuilder<> &builder, DxilModule &DM); void removeOriginalOutputStores(OutputVec &outputStores); }; bool DxilPreserveAllOutputs::runOnFunction(Function &F) { DxilModule &DM = F.getParent()->GetOrCreateDxilModule(); OutputVec outputStores = collectOutputStores(F); if (outputStores.empty()) return false; IRBuilder<> builder(F.getEntryBlock().getFirstInsertionPt()); OutputMap outputMap = generateOutputMap(outputStores, DM); createTempAllocas(outputMap, builder); insertTempOutputStores(outputStores, outputMap, builder); insertFinalOutputStores(F, outputMap, builder, DM); removeOriginalOutputStores(outputStores); return false; } DxilPreserveAllOutputs::OutputVec DxilPreserveAllOutputs::collectOutputStores(Function &F) { OutputVec calls; for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { Instruction *inst = &*I; DxilInst_StoreOutput storeOutput(inst); DxilInst_StoreVertexOutput storeVertexOutput(inst); DxilInst_StorePrimitiveOutput storePrimitiveOutput(inst); DxilInst_StorePatchConstant storePatch(inst); if (storeOutput || storeVertexOutput || storePrimitiveOutput || storePatch) calls.emplace_back(cast<CallInst>(inst)); } return calls; } DxilPreserveAllOutputs::OutputMap DxilPreserveAllOutputs::generateOutputMap(const OutputVec &calls, DxilModule &DM) { OutputMap map; for (const OutputWrite &output : calls) { unsigned sigID = output.GetSignatureID(); if (map.count(sigID)) continue; map.insert( std::make_pair(sigID, OutputElement(output.GetSignatureElement(DM)))); } return map; } void DxilPreserveAllOutputs::createTempAllocas(OutputMap &outputMap, IRBuilder<> &allocaBuilder) { for (auto &iter : outputMap) { OutputElement &output = iter.second; output.CreateAlloca(allocaBuilder); } } void DxilPreserveAllOutputs::insertTempOutputStores(const OutputVec &writes, const OutputMap &map, IRBuilder<> &builder) { for (const OutputWrite &outputWrite : writes) { OutputMap::const_iterator iter = map.find(outputWrite.GetSignatureID()); assert(iter != map.end()); const OutputElement &output = iter->second; builder.SetInsertPoint(outputWrite.GetStore()); output.StoreTemp(builder, outputWrite.GetRow(), outputWrite.GetColumn(), outputWrite.GetValue()); } } void DxilPreserveAllOutputs::insertFinalOutputStores(Function &F, const OutputMap &outputMap, IRBuilder<> &builder, DxilModule &DM) { // Find all return instructions. SmallVector<ReturnInst *, 4> returns; for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { Instruction *inst = &*I; if (ReturnInst *ret = dyn_cast<ReturnInst>(inst)) returns.push_back(ret); } // Write all outputs before each return. for (ReturnInst *ret : returns) { for (const auto &iter : outputMap) { const OutputElement &output = iter.second; builder.SetInsertPoint(ret); output.StoreOutput(builder, DM); } } } void DxilPreserveAllOutputs::removeOriginalOutputStores( OutputVec &outputStores) { for (OutputWrite &write : outputStores) { write.DeleteStore(); } } } // namespace char DxilPreserveAllOutputs::ID = 0; FunctionPass *llvm::createDxilPreserveAllOutputsPass() { return new DxilPreserveAllOutputs(); } INITIALIZE_PASS(DxilPreserveAllOutputs, "hlsl-dxil-preserve-all-outputs", "DXIL preserve all outputs", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxilSimpleGVNHoist.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilSimpleGVNHoist.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // A simple version of GVN hoist for DXIL. // // Based on GVNHoist in LLVM 6.0. // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilOperations.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/IR/CFG.h" #include "llvm/IR/IntrinsicInst.h" #include "dxc/HLSL/DxilNoops.h" #include "llvm/Analysis/PostDominators.h" using namespace llvm; using namespace hlsl; /////////////////////////////////////////////////////////////////////////////// namespace { struct Expression { uint32_t opcode; Type *type; bool commutative = false; SmallVector<uint32_t, 4> varargs; Expression(uint32_t o = ~2U) : opcode(o) {} bool operator==(const Expression &other) const { if (opcode != other.opcode) return false; if (opcode == ~0U || opcode == ~1U) return true; if (type != other.type) return false; if (varargs != other.varargs) return false; return true; } friend hash_code hash_value(const Expression &Value) { return hash_combine( Value.opcode, Value.type, hash_combine_range(Value.varargs.begin(), Value.varargs.end())); } }; } // namespace namespace llvm { template <> struct DenseMapInfo<Expression> { static inline Expression getEmptyKey() { return ~0U; } static inline Expression getTombstoneKey() { return ~1U; } static unsigned getHashValue(const Expression &e) { using llvm::hash_value; return static_cast<unsigned>(hash_value(e)); } static bool isEqual(const Expression &LHS, const Expression &RHS) { return LHS == RHS; } }; } // namespace llvm namespace { // Simple Value table which support DXIL operation. class ValueTable { DenseMap<Value *, uint32_t> valueNumbering; DenseMap<Expression, uint32_t> expressionNumbering; // Expressions is the vector of Expression. ExprIdx is the mapping from // value number to the index of Expression in Expressions. We use it // instead of a DenseMap because filling such mapping is faster than // filling a DenseMap and the compile time is a little better. uint32_t nextExprNumber; std::vector<Expression> Expressions; std::vector<uint32_t> ExprIdx; DominatorTree *DT; uint32_t nextValueNumber = 1; Expression createExpr(Instruction *I); Expression createCmpExpr(unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS); Expression createExtractvalueExpr(ExtractValueInst *EI); uint32_t lookupOrAddCall(CallInst *C); std::pair<uint32_t, bool> assignExpNewValueNum(Expression &exp); public: ValueTable(); ValueTable(const ValueTable &Arg); ValueTable(ValueTable &&Arg); ~ValueTable(); uint32_t lookupOrAdd(Value *V); uint32_t lookup(Value *V, bool Verify = true) const; uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred, Value *LHS, Value *RHS); bool exists(Value *V) const; void add(Value *V, uint32_t num); void clear(); void erase(Value *v); void setDomTree(DominatorTree *D) { DT = D; } uint32_t getNextUnusedValueNumber() { return nextValueNumber; } void verifyRemoved(const Value *) const; }; //===----------------------------------------------------------------------===// // ValueTable Internal Functions //===----------------------------------------------------------------------===// Expression ValueTable::createExpr(Instruction *I) { Expression e; e.type = I->getType(); e.opcode = I->getOpcode(); for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) e.varargs.push_back(lookupOrAdd(*OI)); if (I->isCommutative()) { // Ensure that commutative instructions that only differ by a permutation // of their operands get the same value number by sorting the operand value // numbers. Since all commutative instructions have two operands it is more // efficient to sort by hand rather than using, say, std::sort. assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!"); if (e.varargs[0] > e.varargs[1]) std::swap(e.varargs[0], e.varargs[1]); e.commutative = true; } if (CmpInst *C = dyn_cast<CmpInst>(I)) { // Sort the operand value numbers so x<y and y>x get the same value number. CmpInst::Predicate Predicate = C->getPredicate(); if (e.varargs[0] > e.varargs[1]) { std::swap(e.varargs[0], e.varargs[1]); Predicate = CmpInst::getSwappedPredicate(Predicate); } e.opcode = (C->getOpcode() << 8) | Predicate; e.commutative = true; } else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) { for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); II != IE; ++II) e.varargs.push_back(*II); } return e; } Expression ValueTable::createCmpExpr(unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) { assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && "Not a comparison!"); Expression e; e.type = CmpInst::makeCmpResultType(LHS->getType()); e.varargs.push_back(lookupOrAdd(LHS)); e.varargs.push_back(lookupOrAdd(RHS)); // Sort the operand value numbers so x<y and y>x get the same value number. if (e.varargs[0] > e.varargs[1]) { std::swap(e.varargs[0], e.varargs[1]); Predicate = CmpInst::getSwappedPredicate(Predicate); } e.opcode = (Opcode << 8) | Predicate; e.commutative = true; return e; } Expression ValueTable::createExtractvalueExpr(ExtractValueInst *EI) { assert(EI && "Not an ExtractValueInst?"); Expression e; e.type = EI->getType(); e.opcode = 0; IntrinsicInst *I = dyn_cast<IntrinsicInst>(EI->getAggregateOperand()); if (I != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) { // EI might be an extract from one of our recognised intrinsics. If it // is we'll synthesize a semantically equivalent expression instead on // an extract value expression. switch (I->getIntrinsicID()) { case Intrinsic::sadd_with_overflow: case Intrinsic::uadd_with_overflow: e.opcode = Instruction::Add; break; case Intrinsic::ssub_with_overflow: case Intrinsic::usub_with_overflow: e.opcode = Instruction::Sub; break; case Intrinsic::smul_with_overflow: case Intrinsic::umul_with_overflow: e.opcode = Instruction::Mul; break; default: break; } if (e.opcode != 0) { // Intrinsic recognized. Grab its args to finish building the expression. assert(I->getNumArgOperands() == 2 && "Expect two args for recognised intrinsics."); e.varargs.push_back(lookupOrAdd(I->getArgOperand(0))); e.varargs.push_back(lookupOrAdd(I->getArgOperand(1))); return e; } } // Not a recognised intrinsic. Fall back to producing an extract value // expression. e.opcode = EI->getOpcode(); for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end(); OI != OE; ++OI) e.varargs.push_back(lookupOrAdd(*OI)); for (ExtractValueInst::idx_iterator II = EI->idx_begin(), IE = EI->idx_end(); II != IE; ++II) e.varargs.push_back(*II); return e; } //===----------------------------------------------------------------------===// // ValueTable External Functions //===----------------------------------------------------------------------===// ValueTable::ValueTable() = default; ValueTable::ValueTable(const ValueTable &) = default; ValueTable::ValueTable(ValueTable &&) = default; ValueTable::~ValueTable() = default; /// add - Insert a value into the table with a specified value number. void ValueTable::add(Value *V, uint32_t num) { valueNumbering.insert(std::make_pair(V, num)); } uint32_t ValueTable::lookupOrAddCall(CallInst *C) { Function *F = C->getCalledFunction(); bool bSafe = false; if (F) { if (F->hasFnAttribute(Attribute::ReadNone)) { bSafe = true; } else if (F->hasFnAttribute(Attribute::ReadOnly)) { if (hlsl::OP::IsDxilOpFunc(F)) { DXIL::OpCode Opcode = hlsl::OP::GetDxilOpFuncCallInst(C); switch (Opcode) { default: break; // TODO: make buffer/texture load on srv safe. case DXIL::OpCode::CreateHandleForLib: case DXIL::OpCode::AnnotateHandle: case DXIL::OpCode::CBufferLoad: case DXIL::OpCode::CBufferLoadLegacy: case DXIL::OpCode::Sample: case DXIL::OpCode::SampleBias: case DXIL::OpCode::SampleCmp: case DXIL::OpCode::SampleCmpLevel: case DXIL::OpCode::SampleCmpLevelZero: case DXIL::OpCode::SampleGrad: case DXIL::OpCode::CheckAccessFullyMapped: case DXIL::OpCode::GetDimensions: case DXIL::OpCode::TextureLoad: case DXIL::OpCode::TextureGather: case DXIL::OpCode::TextureGatherCmp: case DXIL::OpCode::Texture2DMSGetSamplePosition: case DXIL::OpCode::RenderTargetGetSampleCount: case DXIL::OpCode::RenderTargetGetSamplePosition: case DXIL::OpCode::CalculateLOD: bSafe = true; break; } } } } if (bSafe) { Expression exp = createExpr(C); uint32_t e = assignExpNewValueNum(exp).first; valueNumbering[C] = e; return e; } else { // Not sure safe or not, always use new value number. valueNumbering[C] = nextValueNumber; return nextValueNumber++; } } /// Returns true if a value number exists for the specified value. bool ValueTable::exists(Value *V) const { return valueNumbering.count(V) != 0; } /// lookup_or_add - Returns the value number for the specified value, assigning /// it a new number if it did not have one before. uint32_t ValueTable::lookupOrAdd(Value *V) { DenseMap<Value *, uint32_t>::iterator VI = valueNumbering.find(V); if (VI != valueNumbering.end()) return VI->second; if (!isa<Instruction>(V)) { valueNumbering[V] = nextValueNumber; return nextValueNumber++; } Instruction *I = cast<Instruction>(V); Expression exp; switch (I->getOpcode()) { case Instruction::Call: return lookupOrAddCall(cast<CallInst>(I)); case Instruction::Add: case Instruction::FAdd: case Instruction::Sub: case Instruction::FSub: case Instruction::Mul: case Instruction::FMul: case Instruction::UDiv: case Instruction::SDiv: case Instruction::FDiv: case Instruction::URem: case Instruction::SRem: case Instruction::FRem: case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: case Instruction::And: case Instruction::Or: case Instruction::Xor: case Instruction::ICmp: case Instruction::FCmp: case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: case Instruction::FPToUI: case Instruction::FPToSI: case Instruction::UIToFP: case Instruction::SIToFP: case Instruction::FPTrunc: case Instruction::FPExt: case Instruction::PtrToInt: case Instruction::IntToPtr: case Instruction::BitCast: case Instruction::Select: case Instruction::ExtractElement: case Instruction::InsertElement: case Instruction::ShuffleVector: case Instruction::InsertValue: case Instruction::GetElementPtr: exp = createExpr(I); break; case Instruction::ExtractValue: exp = createExtractvalueExpr(cast<ExtractValueInst>(I)); break; case Instruction::PHI: valueNumbering[V] = nextValueNumber; return nextValueNumber++; default: valueNumbering[V] = nextValueNumber; return nextValueNumber++; } uint32_t e = assignExpNewValueNum(exp).first; valueNumbering[V] = e; return e; } /// Returns the value number of the specified value. Fails if /// the value has not yet been numbered. uint32_t ValueTable::lookup(Value *V, bool Verify) const { DenseMap<Value *, uint32_t>::const_iterator VI = valueNumbering.find(V); if (Verify) { assert(VI != valueNumbering.end() && "Value not numbered?"); return VI->second; } return (VI != valueNumbering.end()) ? VI->second : 0; } /// Returns the value number of the given comparison, /// assigning it a new number if it did not have one before. Useful when /// we deduced the result of a comparison, but don't immediately have an /// instruction realizing that comparison to hand. uint32_t ValueTable::lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) { Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS); return assignExpNewValueNum(exp).first; } /// Remove all entries from the ValueTable. void ValueTable::clear() { valueNumbering.clear(); expressionNumbering.clear(); nextValueNumber = 1; Expressions.clear(); ExprIdx.clear(); nextExprNumber = 0; } /// Remove a value from the value numbering. void ValueTable::erase(Value *V) { valueNumbering.erase(V); } /// verifyRemoved - Verify that the value is removed from all internal data /// structures. void ValueTable::verifyRemoved(const Value *V) const { for (DenseMap<Value *, uint32_t>::const_iterator I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { assert(I->first != V && "Inst still occurs in value numbering map!"); } } /// Return a pair the first field showing the value number of \p Exp and the /// second field showing whether it is a value number newly created. std::pair<uint32_t, bool> ValueTable::assignExpNewValueNum(Expression &Exp) { uint32_t &e = expressionNumbering[Exp]; bool CreateNewValNum = !e; if (CreateNewValNum) { Expressions.push_back(Exp); if (ExprIdx.size() < nextValueNumber + 1) ExprIdx.resize(nextValueNumber * 2); e = nextValueNumber; ExprIdx[nextValueNumber++] = nextExprNumber++; } return {e, CreateNewValNum}; } } // namespace namespace { // Reduce code size for pattern like this: // if (a.x > 0) { // r = tex.Sample(ss, uv)-1; // } else { // if (a.y > 0) // r = tex.Sample(ss, uv); // else // r = tex.Sample(ss, uv) + 3; // } class DxilSimpleGVNHoist : public FunctionPass { public: static char ID; // Pass identification, replacement for typeid explicit DxilSimpleGVNHoist() : FunctionPass(ID) {} StringRef getPassName() const override { return "DXIL simple GVN hoist"; } bool runOnFunction(Function &F) override; private: bool tryToHoist(BasicBlock *BB, BasicBlock *Succ0, BasicBlock *Succ1); }; char DxilSimpleGVNHoist::ID = 0; bool HasOnePred(BasicBlock *BB) { if (pred_empty(BB)) return false; auto pred = pred_begin(BB); pred++; if (pred != pred_end(BB)) return false; return true; } bool DxilSimpleGVNHoist::tryToHoist(BasicBlock *BB, BasicBlock *Succ0, BasicBlock *Succ1) { // ValueNumber Succ0 and Succ1. ValueTable VT; DenseMap<uint32_t, SmallVector<Instruction *, 2>> VNtoInsts; for (Instruction &I : *Succ0) { uint32_t V = VT.lookupOrAdd(&I); VNtoInsts[V].emplace_back(&I); } std::vector<uint32_t> HoistCandidateVN; for (Instruction &I : *Succ1) { uint32_t V = VT.lookupOrAdd(&I); if (!VNtoInsts.count(V)) continue; VNtoInsts[V].emplace_back(&I); HoistCandidateVN.emplace_back(V); } if (HoistCandidateVN.empty()) { return false; } DenseSet<uint32_t> ProcessedVN; Instruction *TI = BB->getTerminator(); // Hoist need to be in order, so operand could hoist before its users. for (uint32_t VN : HoistCandidateVN) { // Skip processed VN if (ProcessedVN.count(VN)) continue; ProcessedVN.insert(VN); auto &Insts = VNtoInsts[VN]; if (Insts.size() == 1) continue; bool bHoist = false; for (Instruction *I : Insts) { if (I->getParent() == Succ1) { bHoist = true; break; } } Instruction *FirstI = Insts.front(); if (bHoist) { // When operand is different, need to hoist operand. auto it = Insts.begin(); it++; bool bHasDifferentOperand = false; unsigned NumOps = FirstI->getNumOperands(); for (; it != Insts.end(); it++) { Instruction *I = *it; assert(NumOps == I->getNumOperands()); for (unsigned i = 0; i < NumOps; i++) { if (FirstI->getOperand(i) != I->getOperand(i)) { bHasDifferentOperand = true; break; } } if (bHasDifferentOperand) break; } // TODO: hoist operands. if (bHasDifferentOperand) continue; // Move FirstI to BB. FirstI->removeFromParent(); FirstI->insertBefore(TI); } // Replace all insts with same value number with firstI. auto it = Insts.begin(); it++; for (; it != Insts.end(); it++) { Instruction *I = *it; I->replaceAllUsesWith(FirstI); I->eraseFromParent(); } Insts.clear(); } return true; } bool DxilSimpleGVNHoist::runOnFunction(Function &F) { BasicBlock &Entry = F.getEntryBlock(); bool bUpdated = false; for (auto it = po_begin(&Entry); it != po_end(&Entry); it++) { BasicBlock *BB = *it; TerminatorInst *TI = BB->getTerminator(); if (TI->getNumSuccessors() != 2) continue; BasicBlock *Succ0 = TI->getSuccessor(0); BasicBlock *Succ1 = TI->getSuccessor(1); if (BB == Succ0) continue; if (BB == Succ1) continue; if (!HasOnePred(Succ0)) continue; if (!HasOnePred(Succ1)) continue; bUpdated |= tryToHoist(BB, Succ0, Succ1); } return bUpdated; } } // namespace FunctionPass *llvm::createDxilSimpleGVNHoistPass() { return new DxilSimpleGVNHoist(); } INITIALIZE_PASS(DxilSimpleGVNHoist, "dxil-gvn-hoist", "DXIL simple gvn hoist", false, false) //================================================================================ // // This pass tries to turn conditional branches to unconditional branches by // proving two sides of branch are equivalent using ValueTable and dominator // trees. // // The algorithm: // // - Find any conditional branch 'Br' with successors 'S0' and 'S1', where // 'Br' is their sole predecessor. // - Find the common destination 'End' of the branches. // - Find Find two predecessors 'P0' and 'P1' of 'End' such that 'S0' dominates // 'P0' and 'P0' post-dominates 'S0', and 'P0' only has a single successor // (Same with 'S1' and 'P1'). // This means if 'Br'->'S0' is taken, then 'End' must be reached via 'P0'; if // 'End' is reached via 'P0', 'Br'->'S0' must have been taken (Same with 'S1' // and 'P1'). // - Using ValueTable, compare if every pair of incoming values from P0 and P1 // is identical for any PHIs in 'End' // - Make sure there is no side effect or loop between 'Br' and 'End' // - If all above checks succeed, replace 'Br' with with an unconditional branch // to S0 // // The current state of the pass is pretty limited. If incoming values from P0 // and P1 are dependent on any PHIs defined between Br and End, then the pass // will fail to simplify the branch. If there are any side effects within the // region, the pass will also fail. It's possible to handling these cases, but // require proving the two sides of the branch have equivalent control flow, // which is non-trivial, and will be left to a later date. // namespace { class DxilSimpleGVNEliminateRegion : public FunctionPass { bool RegionHasSideEffectsorLoops(BasicBlock *Begin, BasicBlock *End /*Non inclusive*/); std::unordered_map<BasicBlock *, bool> BlockHasSideEffects; bool MayHaveSideEffects(BasicBlock *BB) { auto It = BlockHasSideEffects.find(BB); if (It != BlockHasSideEffects.end()) return It->second; bool HasSideEffects = false; for (Instruction &I : *BB) { if (I.mayHaveSideEffects() && !hlsl::IsNop(&I)) { HasSideEffects = true; break; } } BlockHasSideEffects[BB] = HasSideEffects; return HasSideEffects; } bool ProcessBB(BasicBlock &BB, ValueTable &VT, DominatorTree *DT, PostDominatorTree *PDT); public: static char ID; // Pass identification, replacement for typeid explicit DxilSimpleGVNEliminateRegion() : FunctionPass(ID) {} StringRef getPassName() const override { return "DXIL simple GVN eliminate region"; } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<PostDominatorTree>(); AU.addRequired<DominatorTreeWrapperPass>(); } }; char DxilSimpleGVNEliminateRegion::ID = 0; bool DxilSimpleGVNEliminateRegion::RegionHasSideEffectsorLoops( BasicBlock *Begin, BasicBlock *End /*Non inclusive*/) { SmallVector<BasicBlock *, 10> Worklist; Worklist.push_back(Begin); SmallPtrSet<BasicBlock *, 10> Seen; while (Worklist.size()) { BasicBlock *BB = Worklist.pop_back_val(); // Stop before reaching End if (BB == End) continue; if (MayHaveSideEffects(BB)) return false; Seen.insert(BB); for (BasicBlock *Succ : successors(BB)) { // Goes back into the region. Give up. if (Seen.count(Succ)) return false; Worklist.push_back(Succ); } } return true; } bool DxilSimpleGVNEliminateRegion::ProcessBB(BasicBlock &BB, ValueTable &VT, DominatorTree *DT, PostDominatorTree *PDT) { TerminatorInst *TI = BB.getTerminator(); if (TI->getNumSuccessors() != 2) { return false; } BasicBlock *S0 = TI->getSuccessor(0); BasicBlock *S1 = TI->getSuccessor(1); if (&BB == S0) return false; if (&BB == S1) return false; if (!HasOnePred(S0)) return false; if (!HasOnePred(S1)) return false; BasicBlock *End = PDT->findNearestCommonDominator(S0, S1); // Don't handle this situation for now. if (!End || S0 == End || S1 == End) return false; // If there's no phi node at the beginning of End, then either there's // side effects in the body or this is not the right pass to handle it. if (!isa<PHINode>(End->front())) return false; BasicBlock *P0 = nullptr; BasicBlock *P1 = nullptr; PHINode *FirstPHI = cast<PHINode>(&End->front()); for (unsigned i = 0; i < FirstPHI->getNumIncomingValues(); i++) { BasicBlock *Incoming = FirstPHI->getIncomingBlock(i); if (!Incoming->getSingleSuccessor()) continue; if (DT->dominates(S0, Incoming) && PDT->dominates(Incoming, S0)) { P0 = Incoming; } if (DT->dominates(S1, Incoming) && PDT->dominates(Incoming, S1)) { P1 = Incoming; } } if (!P0 || !P1 || P0 == P1) return false; for (Instruction &I : *End) { PHINode *Phi = dyn_cast<PHINode>(&I); if (!Phi) break; Value *Incoming0 = Phi->getIncomingValueForBlock(P0); Value *Incoming1 = Phi->getIncomingValueForBlock(P1); if (VT.lookupOrAdd(Incoming0) != VT.lookupOrAdd(Incoming1)) { return false; } } if (!RegionHasSideEffectsorLoops(S0, End)) return false; if (!RegionHasSideEffectsorLoops(S1, End)) return false; BranchInst *Br = BranchInst::Create(S0, &BB); Br->setDebugLoc(TI->getDebugLoc()); TI->eraseFromParent(); return true; } bool DxilSimpleGVNEliminateRegion::runOnFunction(Function &F) { PostDominatorTree *PDT = &getAnalysis<PostDominatorTree>(); DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); bool bChanged = false; ValueTable VT; for (BasicBlock &BB : F) { bChanged |= ProcessBB(BB, VT, DT, PDT); } return bChanged; } } // namespace FunctionPass *llvm::createDxilSimpleGVNEliminateRegionPass() { return new DxilSimpleGVNEliminateRegion(); } INITIALIZE_PASS(DxilSimpleGVNEliminateRegion, "dxil-gvn-eliminate-region", "DXIL simple eliminate region", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/DxcOptimizer.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxcOptimizer.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Provides an IDxcOptimizer implementation. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/DxilContainer/DxilContainer.h" #include "dxc/DxilContainer/DxilContainerAssembler.h" #include "dxc/DxilContainer/DxilPipelineStateValidation.h" #include "dxc/DxilContainer/DxilRuntimeReflection.h" #include "dxc/HLSL/ComputeViewIdState.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/HLSL/HLMatrixLowerPass.h" #include "dxc/Support/FileIOHelper.h" #include "dxc/Support/Global.h" #include "dxc/Support/Unicode.h" #include "dxc/Support/WinIncludes.h" #include "dxc/Support/dxcapi.impl.h" #include "dxc/Support/microcom.h" #include "llvm/Analysis/DxilValueCache.h" #include "llvm/Analysis/ReducibilityAnalysis.h" #include "llvm/Analysis/CFGPrinter.h" #include "llvm/Bitcode/ReaderWriter.h" #include "llvm/IR/IRPrintingPasses.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/Verifier.h" #include "llvm/IRReader/IRReader.h" #include "llvm/Pass.h" #include "llvm/PassInfo.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Transforms/IPO/PassManagerBuilder.h" #include <algorithm> #include <list> // should change this for string_table #include <vector> #include "llvm/PassPrinters/PassPrinters.h" using namespace llvm; using namespace hlsl; inline static bool wcseq(LPCWSTR a, LPCWSTR b) { return 0 == wcscmp(a, b); } inline static bool wcsstartswith(LPCWSTR value, LPCWSTR prefix) { while (*value && *prefix && *value == *prefix) { ++value; ++prefix; } return *prefix == L'\0'; } #include "DxcOptimizer.inc" static void FatalErrorHandlerStreamWrite(void *user_data, const std::string &reason, bool gen_crash_diag) { raw_ostream *OS = (raw_ostream *)user_data; *OS << reason; throw std::exception(); } static HRESULT Utf8ToWideCoTaskMalloc(LPCSTR pValue, LPWSTR *ppResult) { if (ppResult == nullptr) return E_POINTER; int count = MultiByteToWideChar(CP_UTF8, 0, pValue, -1, nullptr, 0); *ppResult = (wchar_t *)CoTaskMemAlloc(sizeof(wchar_t) * count); if (*ppResult == nullptr) return E_OUTOFMEMORY; MultiByteToWideChar(CP_UTF8, 0, pValue, -1, *ppResult, count); return S_OK; } class DxcOptimizerPass : public IDxcOptimizerPass { private: DXC_MICROCOM_TM_REF_FIELDS() LPCSTR m_pOptionName; LPCSTR m_pDescription; ArrayRef<LPCSTR> m_pArgNames; ArrayRef<LPCSTR> m_pArgDescriptions; public: DXC_MICROCOM_TM_ADDREF_RELEASE_IMPL() DXC_MICROCOM_TM_CTOR(DxcOptimizerPass) HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void **ppvObject) override { return DoBasicQueryInterface<IDxcOptimizerPass>(this, iid, ppvObject); } HRESULT Initialize(LPCSTR pOptionName, LPCSTR pDescription, ArrayRef<LPCSTR> pArgNames, ArrayRef<LPCSTR> pArgDescriptions) { DXASSERT(pArgNames.size() == pArgDescriptions.size(), "else lookup tables are out of alignment"); m_pOptionName = pOptionName; m_pDescription = pDescription; m_pArgNames = pArgNames; m_pArgDescriptions = pArgDescriptions; return S_OK; } static HRESULT Create(IMalloc *pMalloc, LPCSTR pOptionName, LPCSTR pDescription, ArrayRef<LPCSTR> pArgNames, ArrayRef<LPCSTR> pArgDescriptions, IDxcOptimizerPass **ppResult) { CComPtr<DxcOptimizerPass> result; *ppResult = nullptr; result = DxcOptimizerPass::Alloc(pMalloc); IFROOM(result); IFR(result->Initialize(pOptionName, pDescription, pArgNames, pArgDescriptions)); *ppResult = result.Detach(); return S_OK; } HRESULT STDMETHODCALLTYPE GetOptionName(LPWSTR *ppResult) override { return Utf8ToWideCoTaskMalloc(m_pOptionName, ppResult); } HRESULT STDMETHODCALLTYPE GetDescription(LPWSTR *ppResult) override { return Utf8ToWideCoTaskMalloc(m_pDescription, ppResult); } HRESULT STDMETHODCALLTYPE GetOptionArgCount(UINT32 *pCount) override { if (!pCount) return E_INVALIDARG; *pCount = m_pArgDescriptions.size(); return S_OK; } HRESULT STDMETHODCALLTYPE GetOptionArgName(UINT32 argIndex, LPWSTR *ppResult) override { if (!ppResult) return E_INVALIDARG; if (argIndex >= m_pArgNames.size()) return E_INVALIDARG; return Utf8ToWideCoTaskMalloc(m_pArgNames[argIndex], ppResult); } HRESULT STDMETHODCALLTYPE GetOptionArgDescription(UINT32 argIndex, LPWSTR *ppResult) override { if (!ppResult) return E_INVALIDARG; if (argIndex >= m_pArgDescriptions.size()) return E_INVALIDARG; return Utf8ToWideCoTaskMalloc(m_pArgDescriptions[argIndex], ppResult); } }; class DxcOptimizer : public IDxcOptimizer { private: DXC_MICROCOM_TM_REF_FIELDS() PassRegistry *m_registry; std::vector<const PassInfo *> m_passes; public: DXC_MICROCOM_TM_ADDREF_RELEASE_IMPL() DXC_MICROCOM_TM_CTOR(DxcOptimizer) HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void **ppvObject) override { return DoBasicQueryInterface<IDxcOptimizer>(this, iid, ppvObject); } HRESULT Initialize(); const PassInfo *getPassByID(llvm::AnalysisID PassID); const PassInfo *getPassByName(const char *pName); HRESULT STDMETHODCALLTYPE GetAvailablePassCount(UINT32 *pCount) override { return AssignToOut<UINT32>(m_passes.size(), pCount); } HRESULT STDMETHODCALLTYPE GetAvailablePass(UINT32 index, IDxcOptimizerPass **ppResult) override; HRESULT STDMETHODCALLTYPE RunOptimizer( IDxcBlob *pBlob, LPCWSTR *ppOptions, UINT32 optionCount, IDxcBlob **ppOutputModule, IDxcBlobEncoding **ppOutputText) override; }; class CapturePassManager : public llvm::legacy::PassManagerBase { private: SmallVector<Pass *, 64> Passes; public: ~CapturePassManager() { for (auto P : Passes) delete P; } void add(Pass *P) override { Passes.push_back(P); } size_t size() const { return Passes.size(); } StringRef getPassNameAt(size_t index) const { return Passes[index]->getPassName(); } llvm::AnalysisID getPassIDAt(size_t index) const { return Passes[index]->getPassID(); } }; HRESULT DxcOptimizer::Initialize() { try { m_registry = PassRegistry::getPassRegistry(); struct PRL : public PassRegistrationListener { std::vector<const PassInfo *> *Passes; void passEnumerate(const PassInfo *PI) override { DXASSERT(nullptr != PI->getNormalCtor(), "else cannot construct"); Passes->push_back(PI); } }; PRL prl; prl.Passes = &this->m_passes; m_registry->enumerateWith(&prl); } CATCH_CPP_RETURN_HRESULT(); return S_OK; } const PassInfo *DxcOptimizer::getPassByID(llvm::AnalysisID PassID) { return m_registry->getPassInfo(PassID); } const PassInfo *DxcOptimizer::getPassByName(const char *pName) { return m_registry->getPassInfo(StringRef(pName)); } HRESULT STDMETHODCALLTYPE DxcOptimizer::GetAvailablePass(UINT32 index, IDxcOptimizerPass **ppResult) { IFR(AssignToOut(nullptr, ppResult)); if (index >= m_passes.size()) return E_INVALIDARG; return DxcOptimizerPass::Create( m_pMalloc, m_passes[index]->getPassArgument(), m_passes[index]->getPassName().data(), GetPassArgNames(m_passes[index]->getPassArgument()), GetPassArgDescriptions(m_passes[index]->getPassArgument()), ppResult); } HRESULT STDMETHODCALLTYPE DxcOptimizer::RunOptimizer( IDxcBlob *pBlob, LPCWSTR *ppOptions, UINT32 optionCount, IDxcBlob **ppOutputModule, IDxcBlobEncoding **ppOutputText) { AssignToOutOpt(nullptr, ppOutputModule); AssignToOutOpt(nullptr, ppOutputText); if (pBlob == nullptr) return E_POINTER; if (optionCount > 0 && ppOptions == nullptr) return E_POINTER; DxcThreadMalloc TM(m_pMalloc); try { // Setup input buffer. // // The ir parsing requires the buffer to be null terminated. We deal with // both source and bitcode input, so the input buffer may not be null // terminated; we create a new membuf that copies and appends for this. // // If we have the beginning of a DXIL program header, skip to the bitcode. // LLVMContext Context; SMDiagnostic Err; std::unique_ptr<MemoryBuffer> memBuf; std::unique_ptr<Module> M; const char *pBlobContent = reinterpret_cast<const char *>(pBlob->GetBufferPointer()); unsigned blobSize = pBlob->GetBufferSize(); const DxilProgramHeader *pProgramHeader = reinterpret_cast<const DxilProgramHeader *>(pBlobContent); const DxilContainerHeader *pContainerHeader = IsDxilContainerLike(pBlobContent, blobSize); bool bIsFullContainer = IsValidDxilContainer(pContainerHeader, blobSize); if (bIsFullContainer) { // Prefer debug module, if present. pProgramHeader = GetDxilProgramHeader(pContainerHeader, DFCC_ShaderDebugInfoDXIL); if (!pProgramHeader) pProgramHeader = GetDxilProgramHeader(pContainerHeader, DFCC_DXIL); } if (IsValidDxilProgramHeader(pProgramHeader, blobSize)) { std::string DiagStr; GetDxilProgramBitcode(pProgramHeader, &pBlobContent, &blobSize); M = hlsl::dxilutil::LoadModuleFromBitcode( llvm::StringRef(pBlobContent, blobSize), Context, DiagStr); } else if (!bIsFullContainer) { StringRef bufStrRef(pBlobContent, blobSize); memBuf = MemoryBuffer::getMemBufferCopy(bufStrRef); M = parseIR(memBuf->getMemBufferRef(), Err, Context); } else { return DXC_E_CONTAINER_MISSING_DXIL; } if (M == nullptr) { return DXC_E_IR_VERIFICATION_FAILED; } if (bIsFullContainer) { // Restore extra data from certain parts back into the module so that data // isn't lost. Note: Only GetOrCreateDxilModule if one of these is // present. // - Subobjects from RDAT // - RootSignature from RTS0 // - ViewID and I/O dependency data from PSV0 // - Resource names and types/annotations from STAT // RDAT if (const DxilPartHeader *pPartHeader = GetDxilPartByType(pContainerHeader, DFCC_RuntimeData)) { DxilModule &DM = M->GetOrCreateDxilModule(); RDAT::DxilRuntimeData rdat(GetDxilPartData(pPartHeader), pPartHeader->PartSize); auto table = rdat.GetSubobjectTable(); if (table && table.Count() > 0) { DM.ResetSubobjects(new DxilSubobjects()); if (!LoadSubobjectsFromRDAT(*DM.GetSubobjects(), rdat)) { return DXC_E_CONTAINER_INVALID; } } } // RST0 if (const DxilPartHeader *pPartHeader = GetDxilPartByType(pContainerHeader, DFCC_RootSignature)) { DxilModule &DM = M->GetOrCreateDxilModule(); const uint8_t *pPartData = (const uint8_t *)GetDxilPartData(pPartHeader); std::vector<uint8_t> partData(pPartData, pPartData + pPartHeader->PartSize); DM.ResetSerializedRootSignature(partData); } // PSV0 if (const DxilPartHeader *pPartHeader = GetDxilPartByType( pContainerHeader, DFCC_PipelineStateValidation)) { DxilModule &DM = M->GetOrCreateDxilModule(); std::vector<unsigned int> &viewState = DM.GetSerializedViewIdState(); if (viewState.empty()) { DxilPipelineStateValidation PSV; PSV.InitFromPSV0(GetDxilPartData(pPartHeader), pPartHeader->PartSize); unsigned OutputSizeInUInts = hlsl::LoadViewIDStateFromPSV(nullptr, 0, PSV); if (OutputSizeInUInts) { viewState.assign(OutputSizeInUInts, 0); hlsl::LoadViewIDStateFromPSV(viewState.data(), (unsigned)viewState.size(), PSV); } } } // STAT if (const DxilPartHeader *pPartHeader = GetDxilPartByType(pContainerHeader, DFCC_ShaderStatistics)) { const DxilProgramHeader *pReflProgramHeader = reinterpret_cast<const DxilProgramHeader *>( GetDxilPartData(pPartHeader)); if (IsValidDxilProgramHeader(pReflProgramHeader, pPartHeader->PartSize)) { const char *pReflBitcode; uint32_t reflBitcodeLength; GetDxilProgramBitcode((const DxilProgramHeader *)pReflProgramHeader, &pReflBitcode, &reflBitcodeLength); std::string DiagStr; std::unique_ptr<Module> ReflM = hlsl::dxilutil::LoadModuleFromBitcode( llvm::StringRef(pReflBitcode, reflBitcodeLength), Context, DiagStr); if (ReflM) { // Restore resource names from reflection M->GetOrCreateDxilModule().RestoreResourceReflection( ReflM->GetOrCreateDxilModule()); } } } } legacy::PassManager ModulePasses; legacy::FunctionPassManager FunctionPasses(M.get()); legacy::PassManagerBase *pPassManager = &ModulePasses; CComPtr<AbstractMemoryStream> pOutputStream; CComPtr<IDxcBlob> pOutputBlob; IFT(CreateMemoryStream(m_pMalloc, &pOutputStream)); IFT(pOutputStream.QueryInterface(&pOutputBlob)); raw_stream_ostream outStream(pOutputStream.p); // // Consider some differences from opt.exe: // // Create a new optimization pass for each one specified on the command line // as in StandardLinkOpts, OptLevelO1, etc. // No target machine, and so no passes get their target machine ctor called. // No print-after-each-pass option. // No printing of the pass options. // No StripDebug support. // No verifyModule before starting. // Use of PassPipeline for new manager. // No TargetInfo. // No DataLayout. // bool OutputAssembly = false; bool AnalyzeOnly = false; // First gather flags, wherever they may be. SmallVector<UINT32, 2> handled; for (UINT32 i = 0; i < optionCount; ++i) { if (wcseq(L"-S", ppOptions[i])) { OutputAssembly = true; handled.push_back(i); continue; } if (wcseq(L"-analyze", ppOptions[i])) { AnalyzeOnly = true; handled.push_back(i); continue; } } // TODO: should really use string_table for this once that's available std::list<std::string> optionsAnsi; SmallVector<PassOption, 2> options; for (UINT32 i = 0; i < optionCount; ++i) { if (std::find(handled.begin(), handled.end(), i) != handled.end()) { continue; } // Handle some special cases where we can inject a redirected output // stream. if (wcsstartswith(ppOptions[i], L"-print-module")) { LPCWSTR pName = ppOptions[i] + _countof(L"-print-module") - 1; std::string Banner; if (*pName) { IFTARG(*pName != L':' || *pName != L'='); ++pName; CW2A name8(pName); Banner = "MODULE-PRINT "; Banner += name8.m_psz; Banner += "\n"; } if (pPassManager == &ModulePasses) pPassManager->add(llvm::createPrintModulePass(outStream, Banner)); continue; } // Handle special switches to toggle per-function prepasses vs. module // passes. if (wcseq(ppOptions[i], L"-opt-fn-passes")) { pPassManager = &FunctionPasses; continue; } if (wcseq(ppOptions[i], L"-opt-mod-passes")) { pPassManager = &ModulePasses; continue; } CW2A optName(ppOptions[i]); // The option syntax is const char ArgDelim = ','; // '-' OPTION_NAME (',' ARG_NAME ('=' ARG_VALUE)?)* char *pCursor = optName.m_psz; const char *pEnd = optName.m_psz + strlen(optName.m_psz); if (*pCursor != '-' && *pCursor != '/') { return E_INVALIDARG; } ++pCursor; const char *pOptionNameStart = pCursor; while (*pCursor && *pCursor != ArgDelim) { ++pCursor; } *pCursor = '\0'; const llvm::PassInfo *PassInf = getPassByName(pOptionNameStart); if (!PassInf) { return E_INVALIDARG; } while (pCursor < pEnd) { // *pCursor is '\0' when we overwrite ',' to get a null-terminated // string if (*pCursor && *pCursor != ArgDelim) { return E_INVALIDARG; } ++pCursor; const char *pArgStart = pCursor; while (*pCursor && *pCursor != ArgDelim) { ++pCursor; } StringRef argString = StringRef(pArgStart, pCursor - pArgStart); std::pair<StringRef, StringRef> nameValue = argString.split('='); if (!IsPassOptionName(nameValue.first)) { return E_INVALIDARG; } PassOption *OptionPos = std::lower_bound( options.begin(), options.end(), nameValue, PassOptionsCompare()); // If empty, remove if available; otherwise upsert. if (nameValue.second.empty()) { if (OptionPos != options.end() && OptionPos->first == nameValue.first) { options.erase(OptionPos); } } else { if (OptionPos != options.end() && OptionPos->first == nameValue.first) { OptionPos->second = nameValue.second; } else { options.insert(OptionPos, nameValue); } } } DXASSERT(PassInf->getNormalCtor(), "else pass with no default .ctor was added"); Pass *pass = PassInf->getNormalCtor()(); pass->setOSOverride(&outStream); pass->applyOptions(options); options.clear(); pPassManager->add(pass); if (AnalyzeOnly) { const bool Quiet = false; PassKind Kind = pass->getPassKind(); switch (Kind) { case PT_BasicBlock: pPassManager->add( createBasicBlockPassPrinter(PassInf, outStream, Quiet)); break; case PT_Region: pPassManager->add(createRegionPassPrinter(PassInf, outStream, Quiet)); break; case PT_Loop: pPassManager->add(createLoopPassPrinter(PassInf, outStream, Quiet)); break; case PT_Function: pPassManager->add( createFunctionPassPrinter(PassInf, outStream, Quiet)); break; case PT_CallGraphSCC: pPassManager->add( createCallGraphPassPrinter(PassInf, outStream, Quiet)); break; default: pPassManager->add(createModulePassPrinter(PassInf, outStream, Quiet)); break; } } } ModulePasses.add(createVerifierPass()); if (OutputAssembly) { ModulePasses.add(llvm::createPrintModulePass(outStream)); } // Now that we have all of the passes ready, run them. { raw_ostream *err_ostream = &outStream; ScopedFatalErrorHandler errHandler(FatalErrorHandlerStreamWrite, err_ostream); FunctionPasses.doInitialization(); for (Function &F : *M.get()) if (!F.isDeclaration()) FunctionPasses.run(F); FunctionPasses.doFinalization(); ModulePasses.run(*M.get()); } outStream.flush(); if (ppOutputText != nullptr) { IFT(DxcCreateBlobWithEncodingSet(pOutputBlob, CP_UTF8, ppOutputText)); } if (ppOutputModule != nullptr) { CComPtr<AbstractMemoryStream> pProgramStream; IFT(CreateMemoryStream(m_pMalloc, &pProgramStream)); { raw_stream_ostream outStream(pProgramStream.p); WriteBitcodeToFile(M.get(), outStream, true); } IFT(pProgramStream.QueryInterface(ppOutputModule)); } } CATCH_CPP_RETURN_HRESULT(); return S_OK; } HRESULT CreateDxcOptimizer(REFIID riid, LPVOID *ppv) { CComPtr<DxcOptimizer> result = DxcOptimizer::Alloc(DxcGetThreadMallocNoRef()); if (result == nullptr) { *ppv = nullptr; return E_OUTOFMEMORY; } IFR(result->Initialize()); return result.p->QueryInterface(riid, ppv); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLPreprocess.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLPreprocess.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Preprocess HLModule after inline. // // // /////////////////////////////////////////////////////////////////////////////// #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" #include "dxc/HLSL/DxilGenerationPass.h" using namespace llvm; /////////////////////////////////////////////////////////////////////////////// // HLPreprocess. // Inliner will create stacksave stackstore if there are allocas inside block // other than entry block. HLPreprocess will remove stacksave and stackstore and // put all allocas inside entry block. // namespace { class HLPreprocess : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit HLPreprocess() : ModulePass(ID) {} StringRef getPassName() const override { return "Preprocess HLModule after inline"; } bool runOnModule(Module &M) override { bool bUpdated = false; // Remove stacksave and stackstore. // Get the two intrinsics we care about. Function *StackSave = Intrinsic::getDeclaration(&M, Intrinsic::stacksave); Function *StackRestore = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore); // If has user, remove user first. if (!StackSave->user_empty() || !StackRestore->user_empty()) { for (auto it = StackRestore->user_begin(); it != StackRestore->user_end();) { Instruction *I = cast<Instruction>(*(it++)); I->eraseFromParent(); } for (auto it = StackSave->user_begin(); it != StackSave->user_end();) { Instruction *I = cast<Instruction>(*(it++)); I->eraseFromParent(); } bUpdated = true; } StackSave->eraseFromParent(); StackRestore->eraseFromParent(); // If stacksave/store is present, it means alloca not in the // entry block. However, there could be other cases where allocas // could be present in the non-entry blocks. // Therefore, always go through all non-entry blocks and // make sure all allocas are moved to the entry block. for (Function &F : M.functions()) { bUpdated |= MoveAllocasToEntryBlock(&F); } return bUpdated; } private: bool MoveAllocasToEntryBlock(Function *F); }; char HLPreprocess::ID = 0; // Make sure all allocas are in entry block. bool HLPreprocess::MoveAllocasToEntryBlock(Function *F) { bool changed = false; if (F->getBasicBlockList().size() < 2) return changed; BasicBlock &Entry = F->getEntryBlock(); IRBuilder<> Builder(Entry.getFirstInsertionPt()); for (auto bb = F->begin(); bb != F->end(); bb++) { BasicBlock *BB = bb; if (BB == &Entry) continue; for (auto it = BB->begin(); it != BB->end();) { Instruction *I = (it++); if (isa<AllocaInst>(I)) { I->removeFromParent(); Builder.Insert(I); changed = true; } } } return changed; } } // namespace ModulePass *llvm::createHLPreprocessPass() { return new HLPreprocess(); } INITIALIZE_PASS(HLPreprocess, "hl-preprocess", "Preprocess HLModule after inline", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/HLSL/HLUtil.cpp
/////////////////////////////////////////////////////////////////////////////// // // // HLUtil.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // HL helper functions. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/HLSL/HLUtil.h" #include "dxc/DXIL/DxilTypeSystem.h" #include "dxc/HLSL/HLOperations.h" #include "dxc/Support/Global.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Operator.h" using namespace llvm; using namespace hlsl; using namespace hlsl::hlutil; namespace { void analyzePointer(const Value *V, PointerStatus &PS, DxilTypeSystem &typeSys, bool bStructElt, bool bLdStOnly) { // Early return when only care load store. if (bLdStOnly) { if (PS.HasLoaded() && PS.HasStored()) return; } for (const User *U : V->users()) { if (const Instruction *I = dyn_cast<Instruction>(U)) { const Function *F = I->getParent()->getParent(); if (!PS.AccessingFunction) { PS.AccessingFunction = F; } else { if (F != PS.AccessingFunction) PS.HasMultipleAccessingFunctions = true; } } if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(U)) { analyzePointer(BC, PS, typeSys, bStructElt, bLdStOnly); } else if (const MemCpyInst *MC = dyn_cast<MemCpyInst>(U)) { // Do not collect memcpy on struct GEP use. // These memcpy will be flattened in next level. if (!bStructElt) { MemCpyInst *MI = const_cast<MemCpyInst *>(MC); PS.memcpySet.insert(MI); bool bFullCopy = false; if (ConstantInt *Length = dyn_cast<ConstantInt>(MC->getLength())) { bFullCopy = PS.Size == Length->getLimitedValue() || PS.Size == 0 || Length->getLimitedValue() == 0; // handle unbounded arrays } if (MC->getRawDest() == V) { if (bFullCopy && PS.storedType == PointerStatus::StoredType::NotStored) { PS.storedType = PointerStatus::StoredType::MemcopyDestOnce; PS.StoringMemcpy = MI; } else { PS.MarkAsStored(); PS.StoringMemcpy = nullptr; } } else if (MC->getRawSource() == V) { if (bFullCopy && PS.loadedType == PointerStatus::LoadedType::NotLoaded) { PS.loadedType = PointerStatus::LoadedType::MemcopySrcOnce; PS.LoadingMemcpy = MI; } else { PS.MarkAsLoaded(); PS.LoadingMemcpy = nullptr; } } } else { if (MC->getRawDest() == V) { PS.MarkAsStored(); } else { DXASSERT(MC->getRawSource() == V, "must be source here"); PS.MarkAsLoaded(); } } } else if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { gep_type_iterator GEPIt = gep_type_begin(GEP); gep_type_iterator GEPEnd = gep_type_end(GEP); // Skip pointer idx. GEPIt++; // Struct elt will be flattened in next level. bool bStructElt = (GEPIt != GEPEnd) && GEPIt->isStructTy(); analyzePointer(GEP, PS, typeSys, bStructElt, bLdStOnly); } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { Value *V = SI->getOperand(0); if (PS.storedType == PointerStatus::StoredType::NotStored) { PS.storedType = PointerStatus::StoredType::StoredOnce; PS.StoredOnceValue = V; } else { PS.MarkAsStored(); } } else if (dyn_cast<LoadInst>(U)) { PS.MarkAsLoaded(); } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { Function *F = CI->getCalledFunction(); if (F->isIntrinsic()) { if (F->getIntrinsicID() == Intrinsic::lifetime_start || F->getIntrinsicID() == Intrinsic::lifetime_end) continue; } DxilFunctionAnnotation *annotation = typeSys.GetFunctionAnnotation(F); if (!annotation) { HLOpcodeGroup group = hlsl::GetHLOpcodeGroupByName(F); switch (group) { case HLOpcodeGroup::HLMatLoadStore: { HLMatLoadStoreOpcode opcode = static_cast<HLMatLoadStoreOpcode>(hlsl::GetHLOpcode(CI)); switch (opcode) { case HLMatLoadStoreOpcode::ColMatLoad: case HLMatLoadStoreOpcode::RowMatLoad: PS.MarkAsLoaded(); break; case HLMatLoadStoreOpcode::ColMatStore: case HLMatLoadStoreOpcode::RowMatStore: PS.MarkAsStored(); break; default: DXASSERT(0, "invalid opcode"); PS.MarkAsStored(); PS.MarkAsLoaded(); } } break; case HLOpcodeGroup::HLSubscript: { HLSubscriptOpcode opcode = static_cast<HLSubscriptOpcode>(hlsl::GetHLOpcode(CI)); switch (opcode) { case HLSubscriptOpcode::VectorSubscript: case HLSubscriptOpcode::ColMatElement: case HLSubscriptOpcode::ColMatSubscript: case HLSubscriptOpcode::RowMatElement: case HLSubscriptOpcode::RowMatSubscript: analyzePointer(CI, PS, typeSys, bStructElt, bLdStOnly); break; default: // Rest are resource ptr like buf[i]. // Only read of resource handle. PS.MarkAsLoaded(); break; } } break; default: { // If not sure its out param or not. Take as out param. PS.MarkAsStored(); PS.MarkAsLoaded(); } } continue; } unsigned argSize = F->arg_size(); for (unsigned i = 0; i < argSize; i++) { Value *arg = CI->getArgOperand(i); if (V == arg) { if (bLdStOnly) { auto &paramAnnot = annotation->GetParameterAnnotation(i); switch (paramAnnot.GetParamInputQual()) { default: PS.MarkAsStored(); PS.MarkAsLoaded(); break; case DxilParamInputQual::Out: PS.MarkAsStored(); break; case DxilParamInputQual::In: PS.MarkAsLoaded(); break; } } else { // Do not replace struct arg. // Mark stored and loaded to disable replace. PS.MarkAsStored(); PS.MarkAsLoaded(); } } } } } } } // namespace namespace hlsl { namespace hlutil { void PointerStatus::analyze(DxilTypeSystem &typeSys, bool bStructElt) { analyzePointer(Ptr, *this, typeSys, bStructElt, bLoadStoreOnly); } PointerStatus::PointerStatus(llvm::Value *ptr, unsigned size, bool bLdStOnly) : storedType(StoredType::NotStored), loadedType(LoadedType::NotLoaded), StoredOnceValue(nullptr), StoringMemcpy(nullptr), LoadingMemcpy(nullptr), AccessingFunction(nullptr), HasMultipleAccessingFunctions(false), Size(size), Ptr(ptr), bLoadStoreOnly(bLdStOnly) {} void PointerStatus::MarkAsStored() { storedType = StoredType::Stored; StoredOnceValue = nullptr; } void PointerStatus::MarkAsLoaded() { loadedType = LoadedType::Loaded; } bool PointerStatus::HasStored() { return storedType != StoredType::NotStored && storedType != StoredType::InitializerStored; } bool PointerStatus::HasLoaded() { return loadedType != LoadedType::NotLoaded; } } // namespace hlutil } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/ProfileData/SampleProfWriter.cpp
//===- SampleProfWriter.cpp - Write LLVM sample profile data --------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the class that writes LLVM sample profiles. It // supports two file formats: text and binary. The textual representation // is useful for debugging and testing purposes. The binary representation // is more compact, resulting in smaller file sizes. However, they can // both be used interchangeably. // // See lib/ProfileData/SampleProfReader.cpp for documentation on each of the // supported formats. // //===----------------------------------------------------------------------===// #include "llvm/ProfileData/SampleProfWriter.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorOr.h" #include "llvm/Support/LEB128.h" #include "llvm/Support/LineIterator.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Regex.h" using namespace llvm::sampleprof; using namespace llvm; /// \brief Write samples to a text file. bool SampleProfileWriterText::write(StringRef FName, const FunctionSamples &S) { if (S.empty()) return true; OS << FName << ":" << S.getTotalSamples() << ":" << S.getHeadSamples() << "\n"; for (const auto &I : S.getBodySamples()) { LineLocation Loc = I.first; const SampleRecord &Sample = I.second; if (Loc.Discriminator == 0) OS << Loc.LineOffset << ": "; else OS << Loc.LineOffset << "." << Loc.Discriminator << ": "; OS << Sample.getSamples(); for (const auto &J : Sample.getCallTargets()) OS << " " << J.first() << ":" << J.second; OS << "\n"; } return true; } SampleProfileWriterBinary::SampleProfileWriterBinary(StringRef F, std::error_code &EC) : SampleProfileWriter(F, EC, sys::fs::F_None) { if (EC) return; // Write the file header. encodeULEB128(SPMagic(), OS); encodeULEB128(SPVersion(), OS); } /// \brief Write samples to a binary file. /// /// \returns true if the samples were written successfully, false otherwise. bool SampleProfileWriterBinary::write(StringRef FName, const FunctionSamples &S) { if (S.empty()) return true; OS << FName; encodeULEB128(0, OS); encodeULEB128(S.getTotalSamples(), OS); encodeULEB128(S.getHeadSamples(), OS); encodeULEB128(S.getBodySamples().size(), OS); for (const auto &I : S.getBodySamples()) { LineLocation Loc = I.first; const SampleRecord &Sample = I.second; encodeULEB128(Loc.LineOffset, OS); encodeULEB128(Loc.Discriminator, OS); encodeULEB128(Sample.getSamples(), OS); encodeULEB128(Sample.getCallTargets().size(), OS); for (const auto &J : Sample.getCallTargets()) { std::string Callee = J.first(); unsigned CalleeSamples = J.second; OS << Callee; encodeULEB128(0, OS); encodeULEB128(CalleeSamples, OS); } } return true; } /// \brief Create a sample profile writer based on the specified format. /// /// \param Filename The file to create. /// /// \param Writer The writer to instantiate according to the specified format. /// /// \param Format Encoding format for the profile file. /// /// \returns an error code indicating the status of the created writer. ErrorOr<std::unique_ptr<SampleProfileWriter>> SampleProfileWriter::create(StringRef Filename, SampleProfileFormat Format) { std::error_code EC; std::unique_ptr<SampleProfileWriter> Writer; if (Format == SPF_Binary) Writer.reset(new SampleProfileWriterBinary(Filename, EC)); else if (Format == SPF_Text) Writer.reset(new SampleProfileWriterText(Filename, EC)); else EC = sampleprof_error::unrecognized_format; if (EC) return EC; return std::move(Writer); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/ProfileData/CoverageMappingWriter.cpp
//=-- CoverageMappingWriter.cpp - Code coverage mapping writer -------------=// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing coverage mapping data for // instrumentation based coverage. // //===----------------------------------------------------------------------===// #include "llvm/ProfileData/CoverageMappingWriter.h" #include "llvm/Support/LEB128.h" using namespace llvm; using namespace coverage; void CoverageFilenamesSectionWriter::write(raw_ostream &OS) { encodeULEB128(Filenames.size(), OS); for (const auto &Filename : Filenames) { encodeULEB128(Filename.size(), OS); OS << Filename; } } namespace { /// \brief Gather only the expressions that are used by the mapping /// regions in this function. class CounterExpressionsMinimizer { ArrayRef<CounterExpression> Expressions; llvm::SmallVector<CounterExpression, 16> UsedExpressions; std::vector<unsigned> AdjustedExpressionIDs; public: void mark(Counter C) { if (!C.isExpression()) return; unsigned ID = C.getExpressionID(); AdjustedExpressionIDs[ID] = 1; mark(Expressions[ID].LHS); mark(Expressions[ID].RHS); } void gatherUsed(Counter C) { if (!C.isExpression() || !AdjustedExpressionIDs[C.getExpressionID()]) return; AdjustedExpressionIDs[C.getExpressionID()] = UsedExpressions.size(); const auto &E = Expressions[C.getExpressionID()]; UsedExpressions.push_back(E); gatherUsed(E.LHS); gatherUsed(E.RHS); } CounterExpressionsMinimizer(ArrayRef<CounterExpression> Expressions, ArrayRef<CounterMappingRegion> MappingRegions) : Expressions(Expressions) { AdjustedExpressionIDs.resize(Expressions.size(), 0); for (const auto &I : MappingRegions) mark(I.Count); for (const auto &I : MappingRegions) gatherUsed(I.Count); } ArrayRef<CounterExpression> getExpressions() const { return UsedExpressions; } /// \brief Adjust the given counter to correctly transition from the old /// expression ids to the new expression ids. Counter adjust(Counter C) const { if (C.isExpression()) C = Counter::getExpression(AdjustedExpressionIDs[C.getExpressionID()]); return C; } }; } /// \brief Encode the counter. /// /// The encoding uses the following format: /// Low 2 bits - Tag: /// Counter::Zero(0) - A Counter with kind Counter::Zero /// Counter::CounterValueReference(1) - A counter with kind /// Counter::CounterValueReference /// Counter::Expression(2) + CounterExpression::Subtract(0) - /// A counter with kind Counter::Expression and an expression /// with kind CounterExpression::Subtract /// Counter::Expression(2) + CounterExpression::Add(1) - /// A counter with kind Counter::Expression and an expression /// with kind CounterExpression::Add /// Remaining bits - Counter/Expression ID. static unsigned encodeCounter(ArrayRef<CounterExpression> Expressions, Counter C) { unsigned Tag = unsigned(C.getKind()); if (C.isExpression()) Tag += Expressions[C.getExpressionID()].Kind; unsigned ID = C.getCounterID(); assert(ID <= (std::numeric_limits<unsigned>::max() >> Counter::EncodingTagBits)); return Tag | (ID << Counter::EncodingTagBits); } static void writeCounter(ArrayRef<CounterExpression> Expressions, Counter C, raw_ostream &OS) { encodeULEB128(encodeCounter(Expressions, C), OS); } void CoverageMappingWriter::write(raw_ostream &OS) { // Sort the regions in an ascending order by the file id and the starting // location. std::stable_sort(MappingRegions.begin(), MappingRegions.end()); // Write out the fileid -> filename mapping. encodeULEB128(VirtualFileMapping.size(), OS); for (const auto &FileID : VirtualFileMapping) encodeULEB128(FileID, OS); // Write out the expressions. CounterExpressionsMinimizer Minimizer(Expressions, MappingRegions); auto MinExpressions = Minimizer.getExpressions(); encodeULEB128(MinExpressions.size(), OS); for (const auto &E : MinExpressions) { writeCounter(MinExpressions, Minimizer.adjust(E.LHS), OS); writeCounter(MinExpressions, Minimizer.adjust(E.RHS), OS); } // Write out the mapping regions. // Split the regions into subarrays where each region in a // subarray has a fileID which is the index of that subarray. unsigned PrevLineStart = 0; unsigned CurrentFileID = ~0U; for (auto I = MappingRegions.begin(), E = MappingRegions.end(); I != E; ++I) { if (I->FileID != CurrentFileID) { // Ensure that all file ids have at least one mapping region. assert(I->FileID == (CurrentFileID + 1)); // Find the number of regions with this file id. unsigned RegionCount = 1; for (auto J = I + 1; J != E && I->FileID == J->FileID; ++J) ++RegionCount; // Start a new region sub-array. encodeULEB128(RegionCount, OS); CurrentFileID = I->FileID; PrevLineStart = 0; } Counter Count = Minimizer.adjust(I->Count); switch (I->Kind) { case CounterMappingRegion::CodeRegion: writeCounter(MinExpressions, Count, OS); break; case CounterMappingRegion::ExpansionRegion: { assert(Count.isZero()); assert(I->ExpandedFileID <= (std::numeric_limits<unsigned>::max() >> Counter::EncodingCounterTagAndExpansionRegionTagBits)); // Mark an expansion region with a set bit that follows the counter tag, // and pack the expanded file id into the remaining bits. unsigned EncodedTagExpandedFileID = (1 << Counter::EncodingTagBits) | (I->ExpandedFileID << Counter::EncodingCounterTagAndExpansionRegionTagBits); encodeULEB128(EncodedTagExpandedFileID, OS); break; } case CounterMappingRegion::SkippedRegion: assert(Count.isZero()); encodeULEB128(uint64_t(I->Kind) << Counter::EncodingCounterTagAndExpansionRegionTagBits, OS); break; } assert(I->LineStart >= PrevLineStart); encodeULEB128(I->LineStart - PrevLineStart, OS); encodeULEB128(I->ColumnStart, OS); assert(I->LineEnd >= I->LineStart); encodeULEB128(I->LineEnd - I->LineStart, OS); encodeULEB128(I->ColumnEnd, OS); PrevLineStart = I->LineStart; } // Ensure that all file ids have at least one mapping region. assert(CurrentFileID == (VirtualFileMapping.size() - 1)); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/ProfileData/InstrProfWriter.cpp
//=-- InstrProfWriter.cpp - Instrumented profiling writer -------------------=// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for writing profiling data for clang's // instrumentation based PGO and coverage. // //===----------------------------------------------------------------------===// #include "llvm/ProfileData/InstrProfWriter.h" #include "InstrProfIndexed.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Support/EndianStream.h" #include "llvm/Support/OnDiskHashTable.h" using namespace llvm; namespace { class InstrProfRecordTrait { public: typedef StringRef key_type; typedef StringRef key_type_ref; typedef const InstrProfWriter::CounterData *const data_type; typedef const InstrProfWriter::CounterData *const data_type_ref; typedef uint64_t hash_value_type; typedef uint64_t offset_type; static hash_value_type ComputeHash(key_type_ref K) { return IndexedInstrProf::ComputeHash(IndexedInstrProf::HashType, K); } static std::pair<offset_type, offset_type> EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V) { using namespace llvm::support; endian::Writer<little> LE(Out); offset_type N = K.size(); LE.write<offset_type>(N); offset_type M = 0; for (const auto &Counts : *V) M += (2 + Counts.second.size()) * sizeof(uint64_t); LE.write<offset_type>(M); return std::make_pair(N, M); } static void EmitKey(raw_ostream &Out, key_type_ref K, offset_type N){ Out.write(K.data(), N); } static void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V, offset_type) { using namespace llvm::support; endian::Writer<little> LE(Out); for (const auto &Counts : *V) { LE.write<uint64_t>(Counts.first); LE.write<uint64_t>(Counts.second.size()); for (uint64_t I : Counts.second) LE.write<uint64_t>(I); } } }; } std::error_code InstrProfWriter::addFunctionCounts(StringRef FunctionName, uint64_t FunctionHash, ArrayRef<uint64_t> Counters) { auto &CounterData = FunctionData[FunctionName]; auto Where = CounterData.find(FunctionHash); if (Where == CounterData.end()) { // We've never seen a function with this name and hash, add it. CounterData[FunctionHash] = Counters; // We keep track of the max function count as we go for simplicity. if (Counters[0] > MaxFunctionCount) MaxFunctionCount = Counters[0]; return instrprof_error::success; } // We're updating a function we've seen before. auto &FoundCounters = Where->second; // If the number of counters doesn't match we either have bad data or a hash // collision. if (FoundCounters.size() != Counters.size()) return instrprof_error::count_mismatch; for (size_t I = 0, E = Counters.size(); I < E; ++I) { if (FoundCounters[I] + Counters[I] < FoundCounters[I]) return instrprof_error::counter_overflow; FoundCounters[I] += Counters[I]; } // We keep track of the max function count as we go for simplicity. if (FoundCounters[0] > MaxFunctionCount) MaxFunctionCount = FoundCounters[0]; return instrprof_error::success; } std::pair<uint64_t, uint64_t> InstrProfWriter::writeImpl(raw_ostream &OS) { OnDiskChainedHashTableGenerator<InstrProfRecordTrait> Generator; // Populate the hash table generator. for (const auto &I : FunctionData) Generator.insert(I.getKey(), &I.getValue()); using namespace llvm::support; endian::Writer<little> LE(OS); // Write the header. LE.write<uint64_t>(IndexedInstrProf::Magic); LE.write<uint64_t>(IndexedInstrProf::Version); LE.write<uint64_t>(MaxFunctionCount); LE.write<uint64_t>(static_cast<uint64_t>(IndexedInstrProf::HashType)); // Save a space to write the hash table start location. uint64_t HashTableStartLoc = OS.tell(); LE.write<uint64_t>(0); // Write the hash table. uint64_t HashTableStart = Generator.Emit(OS); return std::make_pair(HashTableStartLoc, HashTableStart); } void InstrProfWriter::write(raw_fd_ostream &OS) { // Write the hash table. auto TableStart = writeImpl(OS); // Go back and fill in the hash table start. using namespace support; OS.seek(TableStart.first); endian::Writer<little>(OS).write<uint64_t>(TableStart.second); } std::unique_ptr<MemoryBuffer> InstrProfWriter::writeBuffer() { std::string Data; llvm::raw_string_ostream OS(Data); // Write the hash table. auto TableStart = writeImpl(OS); OS.flush(); // Go back and fill in the hash table start. using namespace support; uint64_t Bytes = endian::byte_swap<uint64_t, little>(TableStart.second); Data.replace(TableStart.first, sizeof(uint64_t), (const char *)&Bytes, sizeof(uint64_t)); // Return this in an aligned memory buffer. return MemoryBuffer::getMemBufferCopy(Data); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/ProfileData/CMakeLists.txt
add_llvm_library(LLVMProfileData InstrProf.cpp InstrProfReader.cpp InstrProfWriter.cpp CoverageMapping.cpp CoverageMappingWriter.cpp CoverageMappingReader.cpp SampleProf.cpp SampleProfReader.cpp SampleProfWriter.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/ProfileData DEPENDS intrinsics_gen )
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/ProfileData/LLVMBuild.txt
;===- ./lib/ProfileData/LLVMBuild.txt --------------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = ProfileData parent = Libraries required_libraries = Core Support # HLSL Change: remove Object
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/ProfileData/InstrProfIndexed.h
//=-- InstrProfIndexed.h - Indexed profiling format support -------*- C++ -*-=// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Shared header for the instrumented profile data reader and writer. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_PROFILEDATA_INSTRPROFINDEXED_H #define LLVM_LIB_PROFILEDATA_INSTRPROFINDEXED_H #include "llvm/Support/Endian.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MD5.h" namespace llvm { namespace IndexedInstrProf { enum class HashT : uint32_t { MD5, Last = MD5 }; static inline uint64_t MD5Hash(StringRef Str) { MD5 Hash; Hash.update(Str); llvm::MD5::MD5Result Result; Hash.final(Result); // Return the least significant 8 bytes. Our MD5 implementation returns the // result in little endian, so we may need to swap bytes. using namespace llvm::support; return endian::read<uint64_t, little, unaligned>(Result); } static inline uint64_t ComputeHash(HashT Type, StringRef K) { switch (Type) { case HashT::MD5: return IndexedInstrProf::MD5Hash(K); } llvm_unreachable("Unhandled hash type"); } const uint64_t Magic = 0x8169666f72706cff; // "\xfflprofi\x81" const uint64_t Version = 2; const HashT HashType = HashT::MD5; } } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/ProfileData/InstrProfReader.cpp
//=-- InstrProfReader.cpp - Instrumented profiling reader -------------------=// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for reading profiling data for clang's // instrumentation based PGO and coverage. // //===----------------------------------------------------------------------===// #include "llvm/ProfileData/InstrProfReader.h" #include "InstrProfIndexed.h" #include "llvm/ADT/STLExtras.h" #include <cassert> using namespace llvm; static ErrorOr<std::unique_ptr<MemoryBuffer>> setupMemoryBuffer(std::string Path) { ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr = MemoryBuffer::getFileOrSTDIN(Path); if (std::error_code EC = BufferOrErr.getError()) return EC; return std::move(BufferOrErr.get()); } static std::error_code initializeReader(InstrProfReader &Reader) { return Reader.readHeader(); } ErrorOr<std::unique_ptr<InstrProfReader>> InstrProfReader::create(std::string Path) { // Set up the buffer to read. auto BufferOrError = setupMemoryBuffer(Path); if (std::error_code EC = BufferOrError.getError()) return EC; return InstrProfReader::create(std::move(BufferOrError.get())); } ErrorOr<std::unique_ptr<InstrProfReader>> InstrProfReader::create(std::unique_ptr<MemoryBuffer> Buffer) { // Sanity check the buffer. if (Buffer->getBufferSize() > std::numeric_limits<unsigned>::max()) return instrprof_error::too_large; std::unique_ptr<InstrProfReader> Result; // Create the reader. if (IndexedInstrProfReader::hasFormat(*Buffer)) Result.reset(new IndexedInstrProfReader(std::move(Buffer))); else if (RawInstrProfReader64::hasFormat(*Buffer)) Result.reset(new RawInstrProfReader64(std::move(Buffer))); else if (RawInstrProfReader32::hasFormat(*Buffer)) Result.reset(new RawInstrProfReader32(std::move(Buffer))); else Result.reset(new TextInstrProfReader(std::move(Buffer))); // Initialize the reader and return the result. if (std::error_code EC = initializeReader(*Result)) return EC; return std::move(Result); } ErrorOr<std::unique_ptr<IndexedInstrProfReader>> IndexedInstrProfReader::create(std::string Path) { // Set up the buffer to read. auto BufferOrError = setupMemoryBuffer(Path); if (std::error_code EC = BufferOrError.getError()) return EC; return IndexedInstrProfReader::create(std::move(BufferOrError.get())); } ErrorOr<std::unique_ptr<IndexedInstrProfReader>> IndexedInstrProfReader::create(std::unique_ptr<MemoryBuffer> Buffer) { // Sanity check the buffer. if (Buffer->getBufferSize() > std::numeric_limits<unsigned>::max()) return instrprof_error::too_large; // Create the reader. if (!IndexedInstrProfReader::hasFormat(*Buffer)) return instrprof_error::bad_magic; auto Result = llvm::make_unique<IndexedInstrProfReader>(std::move(Buffer)); // Initialize the reader and return the result. if (std::error_code EC = initializeReader(*Result)) return EC; return std::move(Result); } void InstrProfIterator::Increment() { if (Reader->readNextRecord(Record)) *this = InstrProfIterator(); } std::error_code TextInstrProfReader::readNextRecord(InstrProfRecord &Record) { // Skip empty lines and comments. while (!Line.is_at_end() && (Line->empty() || Line->startswith("#"))) ++Line; // If we hit EOF while looking for a name, we're done. if (Line.is_at_end()) return error(instrprof_error::eof); // Read the function name. Record.Name = *Line++; // Read the function hash. if (Line.is_at_end()) return error(instrprof_error::truncated); if ((Line++)->getAsInteger(0, Record.Hash)) return error(instrprof_error::malformed); // Read the number of counters. uint64_t NumCounters; if (Line.is_at_end()) return error(instrprof_error::truncated); if ((Line++)->getAsInteger(10, NumCounters)) return error(instrprof_error::malformed); if (NumCounters == 0) return error(instrprof_error::malformed); // Read each counter and fill our internal storage with the values. Record.Counts.clear(); Record.Counts.reserve(NumCounters); for (uint64_t I = 0; I < NumCounters; ++I) { if (Line.is_at_end()) return error(instrprof_error::truncated); uint64_t Count; if ((Line++)->getAsInteger(10, Count)) return error(instrprof_error::malformed); Record.Counts.push_back(Count); } return success(); } template <class IntPtrT> static uint64_t getRawMagic(); template <> uint64_t getRawMagic<uint64_t>() { return uint64_t(255) << 56 | uint64_t('l') << 48 | uint64_t('p') << 40 | uint64_t('r') << 32 | uint64_t('o') << 24 | uint64_t('f') << 16 | uint64_t('r') << 8 | uint64_t(129); } template <> uint64_t getRawMagic<uint32_t>() { return uint64_t(255) << 56 | uint64_t('l') << 48 | uint64_t('p') << 40 | uint64_t('r') << 32 | uint64_t('o') << 24 | uint64_t('f') << 16 | uint64_t('R') << 8 | uint64_t(129); } template <class IntPtrT> bool RawInstrProfReader<IntPtrT>::hasFormat(const MemoryBuffer &DataBuffer) { if (DataBuffer.getBufferSize() < sizeof(uint64_t)) return false; uint64_t Magic = *reinterpret_cast<const uint64_t *>(DataBuffer.getBufferStart()); return getRawMagic<IntPtrT>() == Magic || sys::getSwappedBytes(getRawMagic<IntPtrT>()) == Magic; } template <class IntPtrT> std::error_code RawInstrProfReader<IntPtrT>::readHeader() { if (!hasFormat(*DataBuffer)) return error(instrprof_error::bad_magic); if (DataBuffer->getBufferSize() < sizeof(RawHeader)) return error(instrprof_error::bad_header); auto *Header = reinterpret_cast<const RawHeader *>(DataBuffer->getBufferStart()); ShouldSwapBytes = Header->Magic != getRawMagic<IntPtrT>(); return readHeader(*Header); } template <class IntPtrT> std::error_code RawInstrProfReader<IntPtrT>::readNextHeader(const char *CurrentPos) { const char *End = DataBuffer->getBufferEnd(); // Skip zero padding between profiles. while (CurrentPos != End && *CurrentPos == 0) ++CurrentPos; // If there's nothing left, we're done. if (CurrentPos == End) return instrprof_error::eof; // If there isn't enough space for another header, this is probably just // garbage at the end of the file. if (CurrentPos + sizeof(RawHeader) > End) return instrprof_error::malformed; // The writer ensures each profile is padded to start at an aligned address. if (reinterpret_cast<size_t>(CurrentPos) % alignOf<uint64_t>()) return instrprof_error::malformed; // The magic should have the same byte order as in the previous header. uint64_t Magic = *reinterpret_cast<const uint64_t *>(CurrentPos); if (Magic != swap(getRawMagic<IntPtrT>())) return instrprof_error::bad_magic; // There's another profile to read, so we need to process the header. auto *Header = reinterpret_cast<const RawHeader *>(CurrentPos); return readHeader(*Header); } static uint64_t getRawVersion() { return 1; } template <class IntPtrT> std::error_code RawInstrProfReader<IntPtrT>::readHeader(const RawHeader &Header) { if (swap(Header.Version) != getRawVersion()) return error(instrprof_error::unsupported_version); CountersDelta = swap(Header.CountersDelta); NamesDelta = swap(Header.NamesDelta); auto DataSize = swap(Header.DataSize); auto CountersSize = swap(Header.CountersSize); auto NamesSize = swap(Header.NamesSize); ptrdiff_t DataOffset = sizeof(RawHeader); ptrdiff_t CountersOffset = DataOffset + sizeof(ProfileData) * DataSize; ptrdiff_t NamesOffset = CountersOffset + sizeof(uint64_t) * CountersSize; size_t ProfileSize = NamesOffset + sizeof(char) * NamesSize; auto *Start = reinterpret_cast<const char *>(&Header); if (Start + ProfileSize > DataBuffer->getBufferEnd()) return error(instrprof_error::bad_header); Data = reinterpret_cast<const ProfileData *>(Start + DataOffset); DataEnd = Data + DataSize; CountersStart = reinterpret_cast<const uint64_t *>(Start + CountersOffset); NamesStart = Start + NamesOffset; ProfileEnd = Start + ProfileSize; return success(); } template <class IntPtrT> std::error_code RawInstrProfReader<IntPtrT>::readNextRecord(InstrProfRecord &Record) { if (Data == DataEnd) if (std::error_code EC = readNextHeader(ProfileEnd)) return EC; // Get the raw data. StringRef RawName(getName(Data->NamePtr), swap(Data->NameSize)); uint32_t NumCounters = swap(Data->NumCounters); if (NumCounters == 0) return error(instrprof_error::malformed); auto RawCounts = makeArrayRef(getCounter(Data->CounterPtr), NumCounters); // Check bounds. auto *NamesStartAsCounter = reinterpret_cast<const uint64_t *>(NamesStart); if (RawName.data() < NamesStart || RawName.data() + RawName.size() > DataBuffer->getBufferEnd() || RawCounts.data() < CountersStart || RawCounts.data() + RawCounts.size() > NamesStartAsCounter) return error(instrprof_error::malformed); // Store the data in Record, byte-swapping as necessary. Record.Hash = swap(Data->FuncHash); Record.Name = RawName; if (ShouldSwapBytes) { Record.Counts.clear(); Record.Counts.reserve(RawCounts.size()); for (uint64_t Count : RawCounts) Record.Counts.push_back(swap(Count)); } else Record.Counts = RawCounts; // Iterate. ++Data; return success(); } namespace llvm { template class RawInstrProfReader<uint32_t>; template class RawInstrProfReader<uint64_t>; } InstrProfLookupTrait::hash_value_type InstrProfLookupTrait::ComputeHash(StringRef K) { return IndexedInstrProf::ComputeHash(HashType, K); } typedef InstrProfLookupTrait::data_type data_type; typedef InstrProfLookupTrait::offset_type offset_type; data_type InstrProfLookupTrait::ReadData(StringRef K, const unsigned char *D, offset_type N) { // Check if the data is corrupt. If so, don't try to read it. if (N % sizeof(uint64_t)) return data_type(); DataBuffer.clear(); uint64_t NumCounts; uint64_t NumEntries = N / sizeof(uint64_t); std::vector<uint64_t> CounterBuffer; for (uint64_t I = 0; I < NumEntries; I += NumCounts) { using namespace support; // The function hash comes first. uint64_t Hash = endian::readNext<uint64_t, little, unaligned>(D); if (++I >= NumEntries) return data_type(); // In v1, we have at least one count. // Later, we have the number of counts. NumCounts = (1 == FormatVersion) ? NumEntries - I : endian::readNext<uint64_t, little, unaligned>(D); if (1 != FormatVersion) ++I; // If we have more counts than data, this is bogus. if (I + NumCounts > NumEntries) return data_type(); CounterBuffer.clear(); for (unsigned J = 0; J < NumCounts; ++J) CounterBuffer.push_back(endian::readNext<uint64_t, little, unaligned>(D)); DataBuffer.push_back(InstrProfRecord(K, Hash, std::move(CounterBuffer))); } return DataBuffer; } bool IndexedInstrProfReader::hasFormat(const MemoryBuffer &DataBuffer) { if (DataBuffer.getBufferSize() < 8) return false; using namespace support; uint64_t Magic = endian::read<uint64_t, little, aligned>(DataBuffer.getBufferStart()); return Magic == IndexedInstrProf::Magic; } std::error_code IndexedInstrProfReader::readHeader() { const unsigned char *Start = (const unsigned char *)DataBuffer->getBufferStart(); const unsigned char *Cur = Start; if ((const unsigned char *)DataBuffer->getBufferEnd() - Cur < 24) return error(instrprof_error::truncated); using namespace support; // Check the magic number. uint64_t Magic = endian::readNext<uint64_t, little, unaligned>(Cur); if (Magic != IndexedInstrProf::Magic) return error(instrprof_error::bad_magic); // Read the version. FormatVersion = endian::readNext<uint64_t, little, unaligned>(Cur); if (FormatVersion > IndexedInstrProf::Version) return error(instrprof_error::unsupported_version); // Read the maximal function count. MaxFunctionCount = endian::readNext<uint64_t, little, unaligned>(Cur); // Read the hash type and start offset. IndexedInstrProf::HashT HashType = static_cast<IndexedInstrProf::HashT>( endian::readNext<uint64_t, little, unaligned>(Cur)); if (HashType > IndexedInstrProf::HashT::Last) return error(instrprof_error::unsupported_hash_type); uint64_t HashOffset = endian::readNext<uint64_t, little, unaligned>(Cur); // The rest of the file is an on disk hash table. Index.reset(InstrProfReaderIndex::Create( Start + HashOffset, Cur, Start, InstrProfLookupTrait(HashType, FormatVersion))); // Set up our iterator for readNextRecord. RecordIterator = Index->data_begin(); return success(); } std::error_code IndexedInstrProfReader::getFunctionCounts( StringRef FuncName, uint64_t FuncHash, std::vector<uint64_t> &Counts) { auto Iter = Index->find(FuncName); if (Iter == Index->end()) return error(instrprof_error::unknown_function); // Found it. Look for counters with the right hash. ArrayRef<InstrProfRecord> Data = (*Iter); if (Data.empty()) return error(instrprof_error::malformed); for (unsigned I = 0, E = Data.size(); I < E; ++I) { // Check for a match and fill the vector if there is one. if (Data[I].Hash == FuncHash) { Counts = Data[I].Counts; return success(); } } return error(instrprof_error::hash_mismatch); } std::error_code IndexedInstrProfReader::readNextRecord(InstrProfRecord &Record) { // Are we out of records? if (RecordIterator == Index->data_end()) return error(instrprof_error::eof); if ((*RecordIterator).empty()) return error(instrprof_error::malformed); static unsigned RecordIndex = 0; ArrayRef<InstrProfRecord> Data = (*RecordIterator); Record = Data[RecordIndex++]; if (RecordIndex >= Data.size()) { ++RecordIterator; RecordIndex = 0; } return success(); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/ProfileData/CoverageMapping.cpp
//=-- CoverageMapping.cpp - Code coverage mapping support ---------*- C++ -*-=// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for clang's and llvm's instrumentation based // code coverage. // //===----------------------------------------------------------------------===// #include "llvm/ProfileData/CoverageMapping.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ProfileData/CoverageMappingReader.h" #include "llvm/ProfileData/InstrProfReader.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Errc.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ManagedStatic.h" #include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; using namespace coverage; #define DEBUG_TYPE "coverage-mapping" Counter CounterExpressionBuilder::get(const CounterExpression &E) { auto It = ExpressionIndices.find(E); if (It != ExpressionIndices.end()) return Counter::getExpression(It->second); unsigned I = Expressions.size(); Expressions.push_back(E); ExpressionIndices[E] = I; return Counter::getExpression(I); } void CounterExpressionBuilder::extractTerms( Counter C, int Sign, SmallVectorImpl<std::pair<unsigned, int>> &Terms) { switch (C.getKind()) { case Counter::Zero: break; case Counter::CounterValueReference: Terms.push_back(std::make_pair(C.getCounterID(), Sign)); break; case Counter::Expression: const auto &E = Expressions[C.getExpressionID()]; extractTerms(E.LHS, Sign, Terms); extractTerms(E.RHS, E.Kind == CounterExpression::Subtract ? -Sign : Sign, Terms); break; } } Counter CounterExpressionBuilder::simplify(Counter ExpressionTree) { // Gather constant terms. llvm::SmallVector<std::pair<unsigned, int>, 32> Terms; extractTerms(ExpressionTree, +1, Terms); // If there are no terms, this is just a zero. The algorithm below assumes at // least one term. if (Terms.size() == 0) return Counter::getZero(); // Group the terms by counter ID. std::sort(Terms.begin(), Terms.end(), [](const std::pair<unsigned, int> &LHS, const std::pair<unsigned, int> &RHS) { return LHS.first < RHS.first; }); // Combine terms by counter ID to eliminate counters that sum to zero. auto Prev = Terms.begin(); for (auto I = Prev + 1, E = Terms.end(); I != E; ++I) { if (I->first == Prev->first) { Prev->second += I->second; continue; } ++Prev; *Prev = *I; } Terms.erase(++Prev, Terms.end()); Counter C; // Create additions. We do this before subtractions to avoid constructs like // ((0 - X) + Y), as opposed to (Y - X). for (auto Term : Terms) { if (Term.second <= 0) continue; for (int I = 0; I < Term.second; ++I) if (C.isZero()) C = Counter::getCounter(Term.first); else C = get(CounterExpression(CounterExpression::Add, C, Counter::getCounter(Term.first))); } // Create subtractions. for (auto Term : Terms) { if (Term.second >= 0) continue; for (int I = 0; I < -Term.second; ++I) C = get(CounterExpression(CounterExpression::Subtract, C, Counter::getCounter(Term.first))); } return C; } Counter CounterExpressionBuilder::add(Counter LHS, Counter RHS) { return simplify(get(CounterExpression(CounterExpression::Add, LHS, RHS))); } Counter CounterExpressionBuilder::subtract(Counter LHS, Counter RHS) { return simplify( get(CounterExpression(CounterExpression::Subtract, LHS, RHS))); } void CounterMappingContext::dump(const Counter &C, llvm::raw_ostream &OS) const { switch (C.getKind()) { case Counter::Zero: OS << '0'; return; case Counter::CounterValueReference: OS << '#' << C.getCounterID(); break; case Counter::Expression: { if (C.getExpressionID() >= Expressions.size()) return; const auto &E = Expressions[C.getExpressionID()]; OS << '('; dump(E.LHS, OS); OS << (E.Kind == CounterExpression::Subtract ? " - " : " + "); dump(E.RHS, OS); OS << ')'; break; } } if (CounterValues.empty()) return; ErrorOr<int64_t> Value = evaluate(C); if (!Value) return; OS << '[' << *Value << ']'; } ErrorOr<int64_t> CounterMappingContext::evaluate(const Counter &C) const { switch (C.getKind()) { case Counter::Zero: return 0; case Counter::CounterValueReference: if (C.getCounterID() >= CounterValues.size()) return make_error_code(errc::argument_out_of_domain); return CounterValues[C.getCounterID()]; case Counter::Expression: { if (C.getExpressionID() >= Expressions.size()) return make_error_code(errc::argument_out_of_domain); const auto &E = Expressions[C.getExpressionID()]; ErrorOr<int64_t> LHS = evaluate(E.LHS); if (!LHS) return LHS; ErrorOr<int64_t> RHS = evaluate(E.RHS); if (!RHS) return RHS; return E.Kind == CounterExpression::Subtract ? *LHS - *RHS : *LHS + *RHS; } } llvm_unreachable("Unhandled CounterKind"); } void FunctionRecordIterator::skipOtherFiles() { while (Current != Records.end() && !Filename.empty() && Filename != Current->Filenames[0]) ++Current; if (Current == Records.end()) *this = FunctionRecordIterator(); } /// Get the function name from the record, removing the filename prefix if /// necessary. static StringRef getFuncNameWithoutPrefix(const CoverageMappingRecord &Record) { StringRef FunctionName = Record.FunctionName; if (Record.Filenames.empty()) return FunctionName; StringRef Filename = sys::path::filename(Record.Filenames[0]); if (FunctionName.startswith(Filename)) FunctionName = FunctionName.drop_front(Filename.size() + 1); return FunctionName; } ErrorOr<std::unique_ptr<CoverageMapping>> CoverageMapping::load(CoverageMappingReader &CoverageReader, IndexedInstrProfReader &ProfileReader) { auto Coverage = std::unique_ptr<CoverageMapping>(new CoverageMapping()); std::vector<uint64_t> Counts; for (const auto &Record : CoverageReader) { CounterMappingContext Ctx(Record.Expressions); Counts.clear(); if (std::error_code EC = ProfileReader.getFunctionCounts( Record.FunctionName, Record.FunctionHash, Counts)) { if (EC == instrprof_error::hash_mismatch) { Coverage->MismatchedFunctionCount++; continue; } else if (EC != instrprof_error::unknown_function) return EC; Counts.assign(Record.MappingRegions.size(), 0); } Ctx.setCounts(Counts); assert(!Record.MappingRegions.empty() && "Function has no regions"); FunctionRecord Function(getFuncNameWithoutPrefix(Record), Record.Filenames); for (const auto &Region : Record.MappingRegions) { ErrorOr<int64_t> ExecutionCount = Ctx.evaluate(Region.Count); if (!ExecutionCount) break; Function.pushRegion(Region, *ExecutionCount); } if (Function.CountedRegions.size() != Record.MappingRegions.size()) { Coverage->MismatchedFunctionCount++; continue; } Coverage->Functions.push_back(std::move(Function)); } return std::move(Coverage); } ErrorOr<std::unique_ptr<CoverageMapping>> CoverageMapping::load(StringRef ObjectFilename, StringRef ProfileFilename, StringRef Arch) { auto CounterMappingBuff = MemoryBuffer::getFileOrSTDIN(ObjectFilename); if (std::error_code EC = CounterMappingBuff.getError()) return EC; auto CoverageReaderOrErr = BinaryCoverageReader::create(CounterMappingBuff.get(), Arch); if (std::error_code EC = CoverageReaderOrErr.getError()) return EC; auto CoverageReader = std::move(CoverageReaderOrErr.get()); auto ProfileReaderOrErr = IndexedInstrProfReader::create(ProfileFilename); if (auto EC = ProfileReaderOrErr.getError()) return EC; auto ProfileReader = std::move(ProfileReaderOrErr.get()); return load(*CoverageReader, *ProfileReader); } namespace { /// \brief Distributes functions into instantiation sets. /// /// An instantiation set is a collection of functions that have the same source /// code, ie, template functions specializations. class FunctionInstantiationSetCollector { typedef DenseMap<std::pair<unsigned, unsigned>, std::vector<const FunctionRecord *>> MapT; MapT InstantiatedFunctions; public: void insert(const FunctionRecord &Function, unsigned FileID) { auto I = Function.CountedRegions.begin(), E = Function.CountedRegions.end(); while (I != E && I->FileID != FileID) ++I; assert(I != E && "function does not cover the given file"); auto &Functions = InstantiatedFunctions[I->startLoc()]; Functions.push_back(&Function); } MapT::iterator begin() { return InstantiatedFunctions.begin(); } MapT::iterator end() { return InstantiatedFunctions.end(); } }; class SegmentBuilder { std::vector<CoverageSegment> Segments; SmallVector<const CountedRegion *, 8> ActiveRegions; /// Start a segment with no count specified. void startSegment(unsigned Line, unsigned Col) { DEBUG(dbgs() << "Top level segment at " << Line << ":" << Col << "\n"); Segments.emplace_back(Line, Col, /*IsRegionEntry=*/false); } /// Start a segment with the given Region's count. void startSegment(unsigned Line, unsigned Col, bool IsRegionEntry, const CountedRegion &Region) { if (Segments.empty()) Segments.emplace_back(Line, Col, IsRegionEntry); CoverageSegment S = Segments.back(); // Avoid creating empty regions. if (S.Line != Line || S.Col != Col) { Segments.emplace_back(Line, Col, IsRegionEntry); S = Segments.back(); } DEBUG(dbgs() << "Segment at " << Line << ":" << Col); // Set this region's count. if (Region.Kind != coverage::CounterMappingRegion::SkippedRegion) { DEBUG(dbgs() << " with count " << Region.ExecutionCount); Segments.back().setCount(Region.ExecutionCount); } DEBUG(dbgs() << "\n"); } /// Start a segment for the given region. void startSegment(const CountedRegion &Region) { startSegment(Region.LineStart, Region.ColumnStart, true, Region); } /// Pop the top region off of the active stack, starting a new segment with /// the containing Region's count. void popRegion() { const CountedRegion *Active = ActiveRegions.back(); unsigned Line = Active->LineEnd, Col = Active->ColumnEnd; ActiveRegions.pop_back(); if (ActiveRegions.empty()) startSegment(Line, Col); else startSegment(Line, Col, false, *ActiveRegions.back()); } public: /// Build a list of CoverageSegments from a sorted list of Regions. std::vector<CoverageSegment> buildSegments(ArrayRef<CountedRegion> Regions) { const CountedRegion *PrevRegion = nullptr; for (const auto &Region : Regions) { // Pop any regions that end before this one starts. while (!ActiveRegions.empty() && ActiveRegions.back()->endLoc() <= Region.startLoc()) popRegion(); if (PrevRegion && PrevRegion->startLoc() == Region.startLoc() && PrevRegion->endLoc() == Region.endLoc()) { if (Region.Kind == coverage::CounterMappingRegion::CodeRegion) Segments.back().addCount(Region.ExecutionCount); } else { // Add this region to the stack. ActiveRegions.push_back(&Region); startSegment(Region); } PrevRegion = &Region; } // Pop any regions that are left in the stack. while (!ActiveRegions.empty()) popRegion(); return Segments; } }; } std::vector<StringRef> CoverageMapping::getUniqueSourceFiles() const { std::vector<StringRef> Filenames; for (const auto &Function : getCoveredFunctions()) Filenames.insert(Filenames.end(), Function.Filenames.begin(), Function.Filenames.end()); std::sort(Filenames.begin(), Filenames.end()); auto Last = std::unique(Filenames.begin(), Filenames.end()); Filenames.erase(Last, Filenames.end()); return Filenames; } static SmallBitVector gatherFileIDs(StringRef SourceFile, const FunctionRecord &Function) { SmallBitVector FilenameEquivalence(Function.Filenames.size(), false); for (unsigned I = 0, E = Function.Filenames.size(); I < E; ++I) if (SourceFile == Function.Filenames[I]) FilenameEquivalence[I] = true; return FilenameEquivalence; } static Optional<unsigned> findMainViewFileID(StringRef SourceFile, const FunctionRecord &Function) { SmallBitVector IsNotExpandedFile(Function.Filenames.size(), true); SmallBitVector FilenameEquivalence = gatherFileIDs(SourceFile, Function); for (const auto &CR : Function.CountedRegions) if (CR.Kind == CounterMappingRegion::ExpansionRegion && FilenameEquivalence[CR.FileID]) IsNotExpandedFile[CR.ExpandedFileID] = false; IsNotExpandedFile &= FilenameEquivalence; int I = IsNotExpandedFile.find_first(); if (I == -1) return None; return I; } static Optional<unsigned> findMainViewFileID(const FunctionRecord &Function) { SmallBitVector IsNotExpandedFile(Function.Filenames.size(), true); for (const auto &CR : Function.CountedRegions) if (CR.Kind == CounterMappingRegion::ExpansionRegion) IsNotExpandedFile[CR.ExpandedFileID] = false; int I = IsNotExpandedFile.find_first(); if (I == -1) return None; return I; } /// Sort a nested sequence of regions from a single file. template <class It> static void sortNestedRegions(It First, It Last) { std::sort(First, Last, [](const CountedRegion &LHS, const CountedRegion &RHS) { if (LHS.startLoc() == RHS.startLoc()) // When LHS completely contains RHS, we sort LHS first. return RHS.endLoc() < LHS.endLoc(); return LHS.startLoc() < RHS.startLoc(); }); } static bool isExpansion(const CountedRegion &R, unsigned FileID) { return R.Kind == CounterMappingRegion::ExpansionRegion && R.FileID == FileID; } CoverageData CoverageMapping::getCoverageForFile(StringRef Filename) { CoverageData FileCoverage(Filename); std::vector<coverage::CountedRegion> Regions; for (const auto &Function : Functions) { auto MainFileID = findMainViewFileID(Filename, Function); if (!MainFileID) continue; auto FileIDs = gatherFileIDs(Filename, Function); for (const auto &CR : Function.CountedRegions) if (FileIDs.test(CR.FileID)) { Regions.push_back(CR); if (isExpansion(CR, *MainFileID)) FileCoverage.Expansions.emplace_back(CR, Function); } } sortNestedRegions(Regions.begin(), Regions.end()); DEBUG(dbgs() << "Emitting segments for file: " << Filename << "\n"); FileCoverage.Segments = SegmentBuilder().buildSegments(Regions); return FileCoverage; } std::vector<const FunctionRecord *> CoverageMapping::getInstantiations(StringRef Filename) { FunctionInstantiationSetCollector InstantiationSetCollector; for (const auto &Function : Functions) { auto MainFileID = findMainViewFileID(Filename, Function); if (!MainFileID) continue; InstantiationSetCollector.insert(Function, *MainFileID); } std::vector<const FunctionRecord *> Result; for (const auto &InstantiationSet : InstantiationSetCollector) { if (InstantiationSet.second.size() < 2) continue; Result.insert(Result.end(), InstantiationSet.second.begin(), InstantiationSet.second.end()); } return Result; } CoverageData CoverageMapping::getCoverageForFunction(const FunctionRecord &Function) { auto MainFileID = findMainViewFileID(Function); if (!MainFileID) return CoverageData(); CoverageData FunctionCoverage(Function.Filenames[*MainFileID]); std::vector<coverage::CountedRegion> Regions; for (const auto &CR : Function.CountedRegions) if (CR.FileID == *MainFileID) { Regions.push_back(CR); if (isExpansion(CR, *MainFileID)) FunctionCoverage.Expansions.emplace_back(CR, Function); } sortNestedRegions(Regions.begin(), Regions.end()); DEBUG(dbgs() << "Emitting segments for function: " << Function.Name << "\n"); FunctionCoverage.Segments = SegmentBuilder().buildSegments(Regions); return FunctionCoverage; } CoverageData CoverageMapping::getCoverageForExpansion(const ExpansionRecord &Expansion) { CoverageData ExpansionCoverage( Expansion.Function.Filenames[Expansion.FileID]); std::vector<coverage::CountedRegion> Regions; for (const auto &CR : Expansion.Function.CountedRegions) if (CR.FileID == Expansion.FileID) { Regions.push_back(CR); if (isExpansion(CR, Expansion.FileID)) ExpansionCoverage.Expansions.emplace_back(CR, Expansion.Function); } sortNestedRegions(Regions.begin(), Regions.end()); DEBUG(dbgs() << "Emitting segments for expansion of file " << Expansion.FileID << "\n"); ExpansionCoverage.Segments = SegmentBuilder().buildSegments(Regions); return ExpansionCoverage; } namespace { class CoverageMappingErrorCategoryType : public std::error_category { const char *name() const LLVM_NOEXCEPT override { return "llvm.coveragemap"; } std::string message(int IE) const override { auto E = static_cast<coveragemap_error>(IE); switch (E) { case coveragemap_error::success: return "Success"; case coveragemap_error::eof: return "End of File"; case coveragemap_error::no_data_found: return "No coverage data found"; case coveragemap_error::unsupported_version: return "Unsupported coverage format version"; case coveragemap_error::truncated: return "Truncated coverage data"; case coveragemap_error::malformed: return "Malformed coverage data"; } llvm_unreachable("A value of coveragemap_error has no message."); } }; } static ManagedStatic<CoverageMappingErrorCategoryType> ErrorCategory; const std::error_category &llvm::coveragemap_category() { return *ErrorCategory; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/ProfileData/InstrProf.cpp
//=-- InstrProf.cpp - Instrumented profiling format support -----------------=// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for clang's instrumentation based PGO and // coverage. // //===----------------------------------------------------------------------===// #include "llvm/ProfileData/InstrProf.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ManagedStatic.h" using namespace llvm; namespace { class InstrProfErrorCategoryType : public std::error_category { const char *name() const LLVM_NOEXCEPT override { return "llvm.instrprof"; } std::string message(int IE) const override { instrprof_error E = static_cast<instrprof_error>(IE); switch (E) { case instrprof_error::success: return "Success"; case instrprof_error::eof: return "End of File"; case instrprof_error::bad_magic: return "Invalid profile data (bad magic)"; case instrprof_error::bad_header: return "Invalid profile data (file header is corrupt)"; case instrprof_error::unsupported_version: return "Unsupported profiling format version"; case instrprof_error::unsupported_hash_type: return "Unsupported profiling hash"; case instrprof_error::too_large: return "Too much profile data"; case instrprof_error::truncated: return "Truncated profile data"; case instrprof_error::malformed: return "Malformed profile data"; case instrprof_error::unknown_function: return "No profile data available for function"; case instrprof_error::hash_mismatch: return "Function hash mismatch"; case instrprof_error::count_mismatch: return "Function count mismatch"; case instrprof_error::counter_overflow: return "Counter overflow"; } llvm_unreachable("A value of instrprof_error has no message."); } }; } static ManagedStatic<InstrProfErrorCategoryType> ErrorCategory; const std::error_category &llvm::instrprof_category() { return *ErrorCategory; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/ProfileData/SampleProf.cpp
//=-- SampleProf.cpp - Sample profiling format support --------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains common definitions used in the reading and writing of // sample profile data. // //===----------------------------------------------------------------------===// #include "llvm/ProfileData/SampleProf.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ManagedStatic.h" using namespace llvm; namespace { class SampleProfErrorCategoryType : public std::error_category { const char *name() const LLVM_NOEXCEPT override { return "llvm.sampleprof"; } std::string message(int IE) const override { sampleprof_error E = static_cast<sampleprof_error>(IE); switch (E) { case sampleprof_error::success: return "Success"; case sampleprof_error::bad_magic: return "Invalid file format (bad magic)"; case sampleprof_error::unsupported_version: return "Unsupported format version"; case sampleprof_error::too_large: return "Too much profile data"; case sampleprof_error::truncated: return "Truncated profile data"; case sampleprof_error::malformed: return "Malformed profile data"; case sampleprof_error::unrecognized_format: return "Unrecognized profile encoding format"; } llvm_unreachable("A value of sampleprof_error has no message."); } }; } static ManagedStatic<SampleProfErrorCategoryType> ErrorCategory; const std::error_category &llvm::sampleprof_category() { return *ErrorCategory; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/ProfileData/CoverageMappingReader.cpp
//=-- CoverageMappingReader.cpp - Code coverage mapping reader ----*- C++ -*-=// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains support for reading coverage mapping data for // instrumentation based coverage. // //===----------------------------------------------------------------------===// #include "llvm/ProfileData/CoverageMappingReader.h" #include "llvm/ADT/DenseSet.h" #include "llvm/Object/MachOUniversal.h" #include "llvm/Object/ObjectFile.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Endian.h" #include "llvm/Support/LEB128.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; using namespace coverage; using namespace object; #define DEBUG_TYPE "coverage-mapping" void CoverageMappingIterator::increment() { // Check if all the records were read or if an error occurred while reading // the next record. if (Reader->readNextRecord(Record)) *this = CoverageMappingIterator(); } std::error_code RawCoverageReader::readULEB128(uint64_t &Result) { if (Data.size() < 1) return coveragemap_error::truncated; unsigned N = 0; Result = decodeULEB128(reinterpret_cast<const uint8_t *>(Data.data()), &N); if (N > Data.size()) return coveragemap_error::malformed; Data = Data.substr(N); return std::error_code(); } std::error_code RawCoverageReader::readIntMax(uint64_t &Result, uint64_t MaxPlus1) { if (auto Err = readULEB128(Result)) return Err; if (Result >= MaxPlus1) return coveragemap_error::malformed; return std::error_code(); } std::error_code RawCoverageReader::readSize(uint64_t &Result) { if (auto Err = readULEB128(Result)) return Err; // Sanity check the number. if (Result > Data.size()) return coveragemap_error::malformed; return std::error_code(); } std::error_code RawCoverageReader::readString(StringRef &Result) { uint64_t Length; if (auto Err = readSize(Length)) return Err; Result = Data.substr(0, Length); Data = Data.substr(Length); return std::error_code(); } std::error_code RawCoverageFilenamesReader::read() { uint64_t NumFilenames; if (auto Err = readSize(NumFilenames)) return Err; for (size_t I = 0; I < NumFilenames; ++I) { StringRef Filename; if (auto Err = readString(Filename)) return Err; Filenames.push_back(Filename); } return std::error_code(); } std::error_code RawCoverageMappingReader::decodeCounter(unsigned Value, Counter &C) { auto Tag = Value & Counter::EncodingTagMask; switch (Tag) { case Counter::Zero: C = Counter::getZero(); return std::error_code(); case Counter::CounterValueReference: C = Counter::getCounter(Value >> Counter::EncodingTagBits); return std::error_code(); default: break; } Tag -= Counter::Expression; switch (Tag) { case CounterExpression::Subtract: case CounterExpression::Add: { auto ID = Value >> Counter::EncodingTagBits; if (ID >= Expressions.size()) return coveragemap_error::malformed; Expressions[ID].Kind = CounterExpression::ExprKind(Tag); C = Counter::getExpression(ID); break; } default: return coveragemap_error::malformed; } return std::error_code(); } std::error_code RawCoverageMappingReader::readCounter(Counter &C) { uint64_t EncodedCounter; if (auto Err = readIntMax(EncodedCounter, std::numeric_limits<unsigned>::max())) return Err; if (auto Err = decodeCounter(EncodedCounter, C)) return Err; return std::error_code(); } static const unsigned EncodingExpansionRegionBit = 1 << Counter::EncodingTagBits; /// \brief Read the sub-array of regions for the given inferred file id. /// \param NumFileIDs the number of file ids that are defined for this /// function. std::error_code RawCoverageMappingReader::readMappingRegionsSubArray( std::vector<CounterMappingRegion> &MappingRegions, unsigned InferredFileID, size_t NumFileIDs) { uint64_t NumRegions; if (auto Err = readSize(NumRegions)) return Err; unsigned LineStart = 0; for (size_t I = 0; I < NumRegions; ++I) { Counter C; CounterMappingRegion::RegionKind Kind = CounterMappingRegion::CodeRegion; // Read the combined counter + region kind. uint64_t EncodedCounterAndRegion; if (auto Err = readIntMax(EncodedCounterAndRegion, std::numeric_limits<unsigned>::max())) return Err; unsigned Tag = EncodedCounterAndRegion & Counter::EncodingTagMask; uint64_t ExpandedFileID = 0; if (Tag != Counter::Zero) { if (auto Err = decodeCounter(EncodedCounterAndRegion, C)) return Err; } else { // Is it an expansion region? if (EncodedCounterAndRegion & EncodingExpansionRegionBit) { Kind = CounterMappingRegion::ExpansionRegion; ExpandedFileID = EncodedCounterAndRegion >> Counter::EncodingCounterTagAndExpansionRegionTagBits; if (ExpandedFileID >= NumFileIDs) return coveragemap_error::malformed; } else { switch (EncodedCounterAndRegion >> Counter::EncodingCounterTagAndExpansionRegionTagBits) { case CounterMappingRegion::CodeRegion: // Don't do anything when we have a code region with a zero counter. break; case CounterMappingRegion::SkippedRegion: Kind = CounterMappingRegion::SkippedRegion; break; default: return coveragemap_error::malformed; } } } // Read the source range. uint64_t LineStartDelta, ColumnStart, NumLines, ColumnEnd; if (auto Err = readIntMax(LineStartDelta, std::numeric_limits<unsigned>::max())) return Err; if (auto Err = readULEB128(ColumnStart)) return Err; if (ColumnStart > std::numeric_limits<unsigned>::max()) return coveragemap_error::malformed; if (auto Err = readIntMax(NumLines, std::numeric_limits<unsigned>::max())) return Err; if (auto Err = readIntMax(ColumnEnd, std::numeric_limits<unsigned>::max())) return Err; LineStart += LineStartDelta; // Adjust the column locations for the empty regions that are supposed to // cover whole lines. Those regions should be encoded with the // column range (1 -> std::numeric_limits<unsigned>::max()), but because // the encoded std::numeric_limits<unsigned>::max() is several bytes long, // we set the column range to (0 -> 0) to ensure that the column start and // column end take up one byte each. // The std::numeric_limits<unsigned>::max() is used to represent a column // position at the end of the line without knowing the length of that line. if (ColumnStart == 0 && ColumnEnd == 0) { ColumnStart = 1; ColumnEnd = std::numeric_limits<unsigned>::max(); } DEBUG({ dbgs() << "Counter in file " << InferredFileID << " " << LineStart << ":" << ColumnStart << " -> " << (LineStart + NumLines) << ":" << ColumnEnd << ", "; if (Kind == CounterMappingRegion::ExpansionRegion) dbgs() << "Expands to file " << ExpandedFileID; else CounterMappingContext(Expressions).dump(C, dbgs()); dbgs() << "\n"; }); MappingRegions.push_back(CounterMappingRegion( C, InferredFileID, ExpandedFileID, LineStart, ColumnStart, LineStart + NumLines, ColumnEnd, Kind)); } return std::error_code(); } std::error_code RawCoverageMappingReader::read() { // Read the virtual file mapping. llvm::SmallVector<unsigned, 8> VirtualFileMapping; uint64_t NumFileMappings; if (auto Err = readSize(NumFileMappings)) return Err; for (size_t I = 0; I < NumFileMappings; ++I) { uint64_t FilenameIndex; if (auto Err = readIntMax(FilenameIndex, TranslationUnitFilenames.size())) return Err; VirtualFileMapping.push_back(FilenameIndex); } // Construct the files using unique filenames and virtual file mapping. for (auto I : VirtualFileMapping) { Filenames.push_back(TranslationUnitFilenames[I]); } // Read the expressions. uint64_t NumExpressions; if (auto Err = readSize(NumExpressions)) return Err; // Create an array of dummy expressions that get the proper counters // when the expressions are read, and the proper kinds when the counters // are decoded. Expressions.resize( NumExpressions, CounterExpression(CounterExpression::Subtract, Counter(), Counter())); for (size_t I = 0; I < NumExpressions; ++I) { if (auto Err = readCounter(Expressions[I].LHS)) return Err; if (auto Err = readCounter(Expressions[I].RHS)) return Err; } // Read the mapping regions sub-arrays. for (unsigned InferredFileID = 0, S = VirtualFileMapping.size(); InferredFileID < S; ++InferredFileID) { if (auto Err = readMappingRegionsSubArray(MappingRegions, InferredFileID, VirtualFileMapping.size())) return Err; } // Set the counters for the expansion regions. // i.e. Counter of expansion region = counter of the first region // from the expanded file. // Perform multiple passes to correctly propagate the counters through // all the nested expansion regions. SmallVector<CounterMappingRegion *, 8> FileIDExpansionRegionMapping; FileIDExpansionRegionMapping.resize(VirtualFileMapping.size(), nullptr); for (unsigned Pass = 1, S = VirtualFileMapping.size(); Pass < S; ++Pass) { for (auto &R : MappingRegions) { if (R.Kind != CounterMappingRegion::ExpansionRegion) continue; assert(!FileIDExpansionRegionMapping[R.ExpandedFileID]); FileIDExpansionRegionMapping[R.ExpandedFileID] = &R; } for (auto &R : MappingRegions) { if (FileIDExpansionRegionMapping[R.FileID]) { FileIDExpansionRegionMapping[R.FileID]->Count = R.Count; FileIDExpansionRegionMapping[R.FileID] = nullptr; } } } return std::error_code(); } namespace { /// \brief A helper structure to access the data from a section /// in an object file. struct SectionData { StringRef Data; uint64_t Address; std::error_code load(SectionRef &Section) { if (auto Err = Section.getContents(Data)) return Err; Address = Section.getAddress(); return std::error_code(); } std::error_code get(uint64_t Pointer, size_t Size, StringRef &Result) { if (Pointer < Address) return coveragemap_error::malformed; auto Offset = Pointer - Address; if (Offset + Size > Data.size()) return coveragemap_error::malformed; Result = Data.substr(Pointer - Address, Size); return std::error_code(); } }; } template <typename T, support::endianness Endian> std::error_code readCoverageMappingData( SectionData &ProfileNames, StringRef Data, std::vector<BinaryCoverageReader::ProfileMappingRecord> &Records, std::vector<StringRef> &Filenames) { using namespace support; llvm::DenseSet<T> UniqueFunctionMappingData; // Read the records in the coverage data section. for (const char *Buf = Data.data(), *End = Buf + Data.size(); Buf < End;) { if (Buf + 4 * sizeof(uint32_t) > End) return coveragemap_error::malformed; uint32_t NRecords = endian::readNext<uint32_t, Endian, unaligned>(Buf); uint32_t FilenamesSize = endian::readNext<uint32_t, Endian, unaligned>(Buf); uint32_t CoverageSize = endian::readNext<uint32_t, Endian, unaligned>(Buf); uint32_t Version = endian::readNext<uint32_t, Endian, unaligned>(Buf); switch (Version) { case CoverageMappingVersion1: break; default: return coveragemap_error::unsupported_version; } // Skip past the function records, saving the start and end for later. const char *FunBuf = Buf; Buf += NRecords * (sizeof(T) + 2 * sizeof(uint32_t) + sizeof(uint64_t)); const char *FunEnd = Buf; // Get the filenames. if (Buf + FilenamesSize > End) return coveragemap_error::malformed; size_t FilenamesBegin = Filenames.size(); RawCoverageFilenamesReader Reader(StringRef(Buf, FilenamesSize), Filenames); if (auto Err = Reader.read()) return Err; Buf += FilenamesSize; // We'll read the coverage mapping records in the loop below. const char *CovBuf = Buf; Buf += CoverageSize; const char *CovEnd = Buf; if (Buf > End) return coveragemap_error::malformed; // Each coverage map has an alignment of 8, so we need to adjust alignment // before reading the next map. Buf += alignmentAdjustment(Buf, 8); while (FunBuf < FunEnd) { // Read the function information T NamePtr = endian::readNext<T, Endian, unaligned>(FunBuf); uint32_t NameSize = endian::readNext<uint32_t, Endian, unaligned>(FunBuf); uint32_t DataSize = endian::readNext<uint32_t, Endian, unaligned>(FunBuf); uint64_t FuncHash = endian::readNext<uint64_t, Endian, unaligned>(FunBuf); // Now use that to read the coverage data. if (CovBuf + DataSize > CovEnd) return coveragemap_error::malformed; auto Mapping = StringRef(CovBuf, DataSize); CovBuf += DataSize; // Ignore this record if we already have a record that points to the same // function name. This is useful to ignore the redundant records for the // functions with ODR linkage. if (!UniqueFunctionMappingData.insert(NamePtr).second) continue; // Finally, grab the name and create a record. StringRef FuncName; if (std::error_code EC = ProfileNames.get(NamePtr, NameSize, FuncName)) return EC; Records.push_back(BinaryCoverageReader::ProfileMappingRecord( CoverageMappingVersion(Version), FuncName, FuncHash, Mapping, FilenamesBegin, Filenames.size() - FilenamesBegin)); } } return std::error_code(); } static const char *TestingFormatMagic = "llvmcovmtestdata"; static std::error_code loadTestingFormat(StringRef Data, SectionData &ProfileNames, StringRef &CoverageMapping, uint8_t &BytesInAddress, support::endianness &Endian) { BytesInAddress = 8; Endian = support::endianness::little; Data = Data.substr(StringRef(TestingFormatMagic).size()); if (Data.size() < 1) return coveragemap_error::truncated; unsigned N = 0; auto ProfileNamesSize = decodeULEB128(reinterpret_cast<const uint8_t *>(Data.data()), &N); if (N > Data.size()) return coveragemap_error::malformed; Data = Data.substr(N); if (Data.size() < 1) return coveragemap_error::truncated; N = 0; ProfileNames.Address = decodeULEB128(reinterpret_cast<const uint8_t *>(Data.data()), &N); if (N > Data.size()) return coveragemap_error::malformed; Data = Data.substr(N); if (Data.size() < ProfileNamesSize) return coveragemap_error::malformed; ProfileNames.Data = Data.substr(0, ProfileNamesSize); CoverageMapping = Data.substr(ProfileNamesSize); return std::error_code(); } #if 0 // HLSL Change Starts - remove support for object files static ErrorOr<SectionRef> lookupSection(ObjectFile &OF, StringRef Name) { StringRef FoundName; for (const auto &Section : OF.sections()) { if (auto EC = Section.getName(FoundName)) return EC; if (FoundName == Name) return Section; } return coveragemap_error::no_data_found; } #endif // HLSL Change Ends - remove support for object files static std::error_code loadBinaryFormat(MemoryBufferRef ObjectBuffer, SectionData &ProfileNames, StringRef &CoverageMapping, uint8_t &BytesInAddress, support::endianness &Endian, StringRef Arch) { #if 1 // HLSL Change Starts - remove support for object files return std::error_code(); #else auto BinOrErr = object::createBinary(ObjectBuffer); if (std::error_code EC = BinOrErr.getError()) return EC; auto Bin = std::move(BinOrErr.get()); std::unique_ptr<ObjectFile> OF; if (auto *Universal = dyn_cast<object::MachOUniversalBinary>(Bin.get())) { // If we have a universal binary, try to look up the object for the // appropriate architecture. auto ObjectFileOrErr = Universal->getObjectForArch(Arch); if (std::error_code EC = ObjectFileOrErr.getError()) return EC; OF = std::move(ObjectFileOrErr.get()); } else if (isa<object::ObjectFile>(Bin.get())) { // For any other object file, upcast and take ownership. OF.reset(cast<object::ObjectFile>(Bin.release())); // If we've asked for a particular arch, make sure they match. if (!Arch.empty() && OF->getArch() != Triple(Arch).getArch()) return object_error::arch_not_found; } else // We can only handle object files. return coveragemap_error::malformed; // The coverage uses native pointer sizes for the object it's written in. BytesInAddress = OF->getBytesInAddress(); Endian = OF->isLittleEndian() ? support::endianness::little : support::endianness::big; // Look for the sections that we are interested in. auto NamesSection = lookupSection(*OF, "__llvm_prf_names"); if (auto EC = NamesSection.getError()) return EC; auto CoverageSection = lookupSection(*OF, "__llvm_covmap"); if (auto EC = CoverageSection.getError()) return EC; // Get the contents of the given sections. if (std::error_code EC = CoverageSection->getContents(CoverageMapping)) return EC; if (std::error_code EC = ProfileNames.load(*NamesSection)) return EC; return std::error_code(); #endif // HLSL Change Ends - remove support for object files } ErrorOr<std::unique_ptr<BinaryCoverageReader>> BinaryCoverageReader::create(std::unique_ptr<MemoryBuffer> &ObjectBuffer, StringRef Arch) { std::unique_ptr<BinaryCoverageReader> Reader(new BinaryCoverageReader()); SectionData Profile; StringRef Coverage; uint8_t BytesInAddress; support::endianness Endian; std::error_code EC; if (ObjectBuffer->getBuffer().startswith(TestingFormatMagic)) // This is a special format used for testing. EC = loadTestingFormat(ObjectBuffer->getBuffer(), Profile, Coverage, BytesInAddress, Endian); else EC = loadBinaryFormat(ObjectBuffer->getMemBufferRef(), Profile, Coverage, BytesInAddress, Endian, Arch); if (EC) return EC; if (BytesInAddress == 4 && Endian == support::endianness::little) EC = readCoverageMappingData<uint32_t, support::endianness::little>( Profile, Coverage, Reader->MappingRecords, Reader->Filenames); else if (BytesInAddress == 4 && Endian == support::endianness::big) EC = readCoverageMappingData<uint32_t, support::endianness::big>( Profile, Coverage, Reader->MappingRecords, Reader->Filenames); else if (BytesInAddress == 8 && Endian == support::endianness::little) EC = readCoverageMappingData<uint64_t, support::endianness::little>( Profile, Coverage, Reader->MappingRecords, Reader->Filenames); else if (BytesInAddress == 8 && Endian == support::endianness::big) EC = readCoverageMappingData<uint64_t, support::endianness::big>( Profile, Coverage, Reader->MappingRecords, Reader->Filenames); else return coveragemap_error::malformed; if (EC) return EC; return std::move(Reader); } std::error_code BinaryCoverageReader::readNextRecord(CoverageMappingRecord &Record) { if (CurrentRecord >= MappingRecords.size()) return coveragemap_error::eof; FunctionsFilenames.clear(); Expressions.clear(); MappingRegions.clear(); auto &R = MappingRecords[CurrentRecord]; RawCoverageMappingReader Reader( R.CoverageMapping, makeArrayRef(Filenames).slice(R.FilenamesBegin, R.FilenamesSize), FunctionsFilenames, Expressions, MappingRegions); if (auto Err = Reader.read()) return Err; Record.FunctionName = R.FunctionName; Record.FunctionHash = R.FunctionHash; Record.Filenames = FunctionsFilenames; Record.Expressions = Expressions; Record.MappingRegions = MappingRegions; ++CurrentRecord; return std::error_code(); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/ProfileData/SampleProfReader.cpp
//===- SampleProfReader.cpp - Read LLVM sample profile data ---------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the class that reads LLVM sample profiles. It // supports two file formats: text and binary. The textual representation // is useful for debugging and testing purposes. The binary representation // is more compact, resulting in smaller file sizes. However, they can // both be used interchangeably. // // NOTE: If you are making changes to the file format, please remember // to document them in the Clang documentation at // tools/clang/docs/UsersManual.rst. // // Text format // ----------- // // Sample profiles are written as ASCII text. The file is divided into // sections, which correspond to each of the functions executed at runtime. // Each section has the following format // // function1:total_samples:total_head_samples // offset1[.discriminator]: number_of_samples [fn1:num fn2:num ... ] // offset2[.discriminator]: number_of_samples [fn3:num fn4:num ... ] // ... // offsetN[.discriminator]: number_of_samples [fn5:num fn6:num ... ] // // The file may contain blank lines between sections and within a // section. However, the spacing within a single line is fixed. Additional // spaces will result in an error while reading the file. // // Function names must be mangled in order for the profile loader to // match them in the current translation unit. The two numbers in the // function header specify how many total samples were accumulated in the // function (first number), and the total number of samples accumulated // in the prologue of the function (second number). This head sample // count provides an indicator of how frequently the function is invoked. // // Each sampled line may contain several items. Some are optional (marked // below): // // a. Source line offset. This number represents the line number // in the function where the sample was collected. The line number is // always relative to the line where symbol of the function is // defined. So, if the function has its header at line 280, the offset // 13 is at line 293 in the file. // // Note that this offset should never be a negative number. This could // happen in cases like macros. The debug machinery will register the // line number at the point of macro expansion. So, if the macro was // expanded in a line before the start of the function, the profile // converter should emit a 0 as the offset (this means that the optimizers // will not be able to associate a meaningful weight to the instructions // in the macro). // // b. [OPTIONAL] Discriminator. This is used if the sampled program // was compiled with DWARF discriminator support // (http://wiki.dwarfstd.org/index.php?title=Path_Discriminators). // DWARF discriminators are unsigned integer values that allow the // compiler to distinguish between multiple execution paths on the // same source line location. // // For example, consider the line of code ``if (cond) foo(); else bar();``. // If the predicate ``cond`` is true 80% of the time, then the edge // into function ``foo`` should be considered to be taken most of the // time. But both calls to ``foo`` and ``bar`` are at the same source // line, so a sample count at that line is not sufficient. The // compiler needs to know which part of that line is taken more // frequently. // // This is what discriminators provide. In this case, the calls to // ``foo`` and ``bar`` will be at the same line, but will have // different discriminator values. This allows the compiler to correctly // set edge weights into ``foo`` and ``bar``. // // c. Number of samples. This is an integer quantity representing the // number of samples collected by the profiler at this source // location. // // d. [OPTIONAL] Potential call targets and samples. If present, this // line contains a call instruction. This models both direct and // number of samples. For example, // // 130: 7 foo:3 bar:2 baz:7 // // The above means that at relative line offset 130 there is a call // instruction that calls one of ``foo()``, ``bar()`` and ``baz()``, // with ``baz()`` being the relatively more frequently called target. // //===----------------------------------------------------------------------===// #include "llvm/ProfileData/SampleProfReader.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorOr.h" #include "llvm/Support/LEB128.h" #include "llvm/Support/LineIterator.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Regex.h" using namespace llvm::sampleprof; using namespace llvm; /// \brief Print the samples collected for a function on stream \p OS. /// /// \param OS Stream to emit the output to. void FunctionSamples::print(raw_ostream &OS) { OS << TotalSamples << ", " << TotalHeadSamples << ", " << BodySamples.size() << " sampled lines\n"; for (const auto &SI : BodySamples) { LineLocation Loc = SI.first; const SampleRecord &Sample = SI.second; OS << "\tline offset: " << Loc.LineOffset << ", discriminator: " << Loc.Discriminator << ", number of samples: " << Sample.getSamples(); if (Sample.hasCalls()) { OS << ", calls:"; for (const auto &I : Sample.getCallTargets()) OS << " " << I.first() << ":" << I.second; } OS << "\n"; } OS << "\n"; } /// \brief Dump the function profile for \p FName. /// /// \param FName Name of the function to print. /// \param OS Stream to emit the output to. void SampleProfileReader::dumpFunctionProfile(StringRef FName, raw_ostream &OS) { OS << "Function: " << FName << ": "; Profiles[FName].print(OS); } /// \brief Dump all the function profiles found on stream \p OS. void SampleProfileReader::dump(raw_ostream &OS) { for (const auto &I : Profiles) dumpFunctionProfile(I.getKey(), OS); } /// \brief Load samples from a text file. /// /// See the documentation at the top of the file for an explanation of /// the expected format. /// /// \returns true if the file was loaded successfully, false otherwise. std::error_code SampleProfileReaderText::read() { line_iterator LineIt(*Buffer, /*SkipBlanks=*/true, '#'); // Read the profile of each function. Since each function may be // mentioned more than once, and we are collecting flat profiles, // accumulate samples as we parse them. Regex HeadRE("^([^0-9].*):([0-9]+):([0-9]+)$"); Regex LineSampleRE("^([0-9]+)\\.?([0-9]+)?: ([0-9]+)(.*)$"); Regex CallSampleRE(" +([^0-9 ][^ ]*):([0-9]+)"); while (!LineIt.is_at_eof()) { // Read the header of each function. // // Note that for function identifiers we are actually expecting // mangled names, but we may not always get them. This happens when // the compiler decides not to emit the function (e.g., it was inlined // and removed). In this case, the binary will not have the linkage // name for the function, so the profiler will emit the function's // unmangled name, which may contain characters like ':' and '>' in its // name (member functions, templates, etc). // // The only requirement we place on the identifier, then, is that it // should not begin with a number. SmallVector<StringRef, 4> Matches; if (!HeadRE.match(*LineIt, &Matches)) { reportParseError(LineIt.line_number(), "Expected 'mangled_name:NUM:NUM', found " + *LineIt); return sampleprof_error::malformed; } assert(Matches.size() == 4); StringRef FName = Matches[1]; unsigned NumSamples, NumHeadSamples; Matches[2].getAsInteger(10, NumSamples); Matches[3].getAsInteger(10, NumHeadSamples); Profiles[FName] = FunctionSamples(); FunctionSamples &FProfile = Profiles[FName]; FProfile.addTotalSamples(NumSamples); FProfile.addHeadSamples(NumHeadSamples); ++LineIt; // Now read the body. The body of the function ends when we reach // EOF or when we see the start of the next function. while (!LineIt.is_at_eof() && isdigit((*LineIt)[0])) { if (!LineSampleRE.match(*LineIt, &Matches)) { reportParseError( LineIt.line_number(), "Expected 'NUM[.NUM]: NUM[ mangled_name:NUM]*', found " + *LineIt); return sampleprof_error::malformed; } assert(Matches.size() == 5); unsigned LineOffset, NumSamples, Discriminator = 0; Matches[1].getAsInteger(10, LineOffset); if (Matches[2] != "") Matches[2].getAsInteger(10, Discriminator); Matches[3].getAsInteger(10, NumSamples); // If there are function calls in this line, generate a call sample // entry for each call. std::string CallsLine(Matches[4]); while (CallsLine != "") { SmallVector<StringRef, 3> CallSample; if (!CallSampleRE.match(CallsLine, &CallSample)) { reportParseError(LineIt.line_number(), "Expected 'mangled_name:NUM', found " + CallsLine); return sampleprof_error::malformed; } StringRef CalledFunction = CallSample[1]; unsigned CalledFunctionSamples; CallSample[2].getAsInteger(10, CalledFunctionSamples); FProfile.addCalledTargetSamples(LineOffset, Discriminator, CalledFunction, CalledFunctionSamples); CallsLine = CallSampleRE.sub("", CallsLine); } FProfile.addBodySamples(LineOffset, Discriminator, NumSamples); ++LineIt; } } return sampleprof_error::success; } template <typename T> ErrorOr<T> SampleProfileReaderBinary::readNumber() { unsigned NumBytesRead = 0; std::error_code EC; uint64_t Val = decodeULEB128(Data, &NumBytesRead); if (Val > std::numeric_limits<T>::max()) EC = sampleprof_error::malformed; else if (Data + NumBytesRead > End) EC = sampleprof_error::truncated; else EC = sampleprof_error::success; if (EC) { reportParseError(0, EC.message()); return EC; } Data += NumBytesRead; return static_cast<T>(Val); } ErrorOr<StringRef> SampleProfileReaderBinary::readString() { std::error_code EC; StringRef Str(reinterpret_cast<const char *>(Data)); if (Data + Str.size() + 1 > End) { EC = sampleprof_error::truncated; reportParseError(0, EC.message()); return EC; } Data += Str.size() + 1; return Str; } std::error_code SampleProfileReaderBinary::read() { while (!at_eof()) { auto FName(readString()); if (std::error_code EC = FName.getError()) return EC; Profiles[*FName] = FunctionSamples(); FunctionSamples &FProfile = Profiles[*FName]; auto Val = readNumber<unsigned>(); if (std::error_code EC = Val.getError()) return EC; FProfile.addTotalSamples(*Val); Val = readNumber<unsigned>(); if (std::error_code EC = Val.getError()) return EC; FProfile.addHeadSamples(*Val); // Read the samples in the body. auto NumRecords = readNumber<unsigned>(); if (std::error_code EC = NumRecords.getError()) return EC; for (unsigned I = 0; I < *NumRecords; ++I) { auto LineOffset = readNumber<uint64_t>(); if (std::error_code EC = LineOffset.getError()) return EC; auto Discriminator = readNumber<uint64_t>(); if (std::error_code EC = Discriminator.getError()) return EC; auto NumSamples = readNumber<uint64_t>(); if (std::error_code EC = NumSamples.getError()) return EC; auto NumCalls = readNumber<unsigned>(); if (std::error_code EC = NumCalls.getError()) return EC; for (unsigned J = 0; J < *NumCalls; ++J) { auto CalledFunction(readString()); if (std::error_code EC = CalledFunction.getError()) return EC; auto CalledFunctionSamples = readNumber<uint64_t>(); if (std::error_code EC = CalledFunctionSamples.getError()) return EC; FProfile.addCalledTargetSamples(*LineOffset, *Discriminator, *CalledFunction, *CalledFunctionSamples); } FProfile.addBodySamples(*LineOffset, *Discriminator, *NumSamples); } } return sampleprof_error::success; } std::error_code SampleProfileReaderBinary::readHeader() { Data = reinterpret_cast<const uint8_t *>(Buffer->getBufferStart()); End = Data + Buffer->getBufferSize(); // Read and check the magic identifier. auto Magic = readNumber<uint64_t>(); if (std::error_code EC = Magic.getError()) return EC; else if (*Magic != SPMagic()) return sampleprof_error::bad_magic; // Read the version number. auto Version = readNumber<uint64_t>(); if (std::error_code EC = Version.getError()) return EC; else if (*Version != SPVersion()) return sampleprof_error::unsupported_version; return sampleprof_error::success; } bool SampleProfileReaderBinary::hasFormat(const MemoryBuffer &Buffer) { const uint8_t *Data = reinterpret_cast<const uint8_t *>(Buffer.getBufferStart()); uint64_t Magic = decodeULEB128(Data); return Magic == SPMagic(); } /// \brief Prepare a memory buffer for the contents of \p Filename. /// /// \returns an error code indicating the status of the buffer. static ErrorOr<std::unique_ptr<MemoryBuffer>> setupMemoryBuffer(std::string Filename) { auto BufferOrErr = MemoryBuffer::getFileOrSTDIN(Filename); if (std::error_code EC = BufferOrErr.getError()) return EC; auto Buffer = std::move(BufferOrErr.get()); // Sanity check the file. if (Buffer->getBufferSize() > std::numeric_limits<unsigned>::max()) return sampleprof_error::too_large; return std::move(Buffer); } /// \brief Create a sample profile reader based on the format of the input file. /// /// \param Filename The file to open. /// /// \param Reader The reader to instantiate according to \p Filename's format. /// /// \param C The LLVM context to use to emit diagnostics. /// /// \returns an error code indicating the status of the created reader. ErrorOr<std::unique_ptr<SampleProfileReader>> SampleProfileReader::create(StringRef Filename, LLVMContext &C) { auto BufferOrError = setupMemoryBuffer(Filename); if (std::error_code EC = BufferOrError.getError()) return EC; auto Buffer = std::move(BufferOrError.get()); std::unique_ptr<SampleProfileReader> Reader; if (SampleProfileReaderBinary::hasFormat(*Buffer)) Reader.reset(new SampleProfileReaderBinary(std::move(Buffer), C)); else Reader.reset(new SampleProfileReaderText(std::move(Buffer), C)); if (std::error_code EC = Reader->readHeader()) return EC; return std::move(Reader); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/Process.cpp
//===-- Process.cpp - Implement OS Process Concept --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the operating system Process concept. // //===----------------------------------------------------------------------===// #include "llvm/ADT/StringExtras.h" #include "llvm/Config/config.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Path.h" #include "llvm/Support/Process.h" #include "llvm/Support/Program.h" using namespace llvm; using namespace sys; //===----------------------------------------------------------------------===// //=== WARNING: Implementation here must contain only TRULY operating system //=== independent code. //===----------------------------------------------------------------------===// Optional<std::string> Process::FindInEnvPath(const std::string& EnvName, const std::string& FileName) { assert(!path::is_absolute(FileName)); Optional<std::string> FoundPath; Optional<std::string> OptPath = Process::GetEnv(EnvName); if (!OptPath.hasValue()) return FoundPath; const char EnvPathSeparatorStr[] = {EnvPathSeparator, '\0'}; SmallVector<StringRef, 8> Dirs; SplitString(OptPath.getValue(), Dirs, EnvPathSeparatorStr); for (const auto &Dir : Dirs) { if (Dir.empty()) continue; SmallString<128> FilePath(Dir); path::append(FilePath, FileName); if (fs::exists(Twine(FilePath))) { FoundPath = FilePath.str(); break; } } return FoundPath; } #define COLOR(FGBG, CODE, BOLD) "\033[0;" BOLD FGBG CODE "m" #define ALLCOLORS(FGBG,BOLD) {\ COLOR(FGBG, "0", BOLD),\ COLOR(FGBG, "1", BOLD),\ COLOR(FGBG, "2", BOLD),\ COLOR(FGBG, "3", BOLD),\ COLOR(FGBG, "4", BOLD),\ COLOR(FGBG, "5", BOLD),\ COLOR(FGBG, "6", BOLD),\ COLOR(FGBG, "7", BOLD)\ } [[maybe_unused]] static const char colorcodes[2][2][8][10] = { { ALLCOLORS("3",""), ALLCOLORS("3","1;") }, { ALLCOLORS("4",""), ALLCOLORS("4","1;") } }; // Include the platform-specific parts of this class. #ifdef LLVM_ON_UNIX #include "Unix/Process.inc" #endif #ifdef LLVM_ON_WIN32 #include "Windows/Process.inc" #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/ARMWinEH.cpp
//===-- ARMWinEH.cpp - Windows on ARM EH Support Functions ------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Support/ARMWinEH.h" #include "llvm/Support/raw_ostream.h" namespace llvm { namespace ARM { namespace WinEH { std::pair<uint16_t, uint32_t> SavedRegisterMask(const RuntimeFunction &RF) { uint8_t NumRegisters = RF.Reg(); uint8_t RegistersVFP = RF.R(); uint8_t LinkRegister = RF.L(); uint8_t ChainedFrame = RF.C(); uint16_t GPRMask = (ChainedFrame << 11) | (LinkRegister << 14); uint32_t VFPMask = 0; if (RegistersVFP) VFPMask |= (((1 << ((NumRegisters + 1) % 8)) - 1) << 8); else GPRMask |= (((1 << (NumRegisters + 1)) - 1) << 4); if (PrologueFolding(RF)) GPRMask |= (((1 << (NumRegisters + 1)) - 1) << (~RF.StackAdjust() & 0x3)); return std::make_pair(GPRMask, VFPMask); } } } }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/Options.cpp
//===- llvm/Support/Options.cpp - Debug options support ---------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the helper objects for defining debug options using the // new API built on cl::opt, but not requiring the use of static globals. // //===----------------------------------------------------------------------===// #include "llvm/Support/Options.h" #include "llvm/Support/ManagedStatic.h" using namespace llvm; OptionRegistry::~OptionRegistry() { for (auto IT = Options.begin(); IT != Options.end(); ++IT) delete IT->second; } void OptionRegistry::addOption(void *Key, cl::Option *O) { assert(Options.find(Key) == Options.end() && "Argument with this key already registerd"); Options.insert(std::make_pair(Key, O)); } static ManagedStatic<OptionRegistry> OR; OptionRegistry &OptionRegistry::instance() { return *OR; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/regex2.h
/*- * This code is derived from OpenBSD's libc/regex, original license follows: * * Copyright (c) 1992, 1993, 1994 Henry Spencer. * Copyright (c) 1992, 1993, 1994 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Henry Spencer. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)regex2.h 8.4 (Berkeley) 3/20/94 */ #ifndef LLVM_SUPPORT_REGEX2_H #define LLVM_SUPPORT_REGEX2_H #include "regutils.h" #include <stddef.h> /* * internals of regex_t */ #define MAGIC1 ((('r'^0200)<<8) | 'e') /* * The internal representation is a *strip*, a sequence of * operators ending with an endmarker. (Some terminology etc. is a * historical relic of earlier versions which used multiple strips.) * Certain oddities in the representation are there to permit running * the machinery backwards; in particular, any deviation from sequential * flow must be marked at both its source and its destination. Some * fine points: * * - OPLUS_ and O_PLUS are *inside* the loop they create. * - OQUEST_ and O_QUEST are *outside* the bypass they create. * - OCH_ and O_CH are *outside* the multi-way branch they create, while * OOR1 and OOR2 are respectively the end and the beginning of one of * the branches. Note that there is an implicit OOR2 following OCH_ * and an implicit OOR1 preceding O_CH. * * In state representations, an operator's bit is on to signify a state * immediately *preceding* "execution" of that operator. */ typedef unsigned long sop; /* strip operator */ typedef long sopno; #define OPRMASK 0xf8000000LU #define OPDMASK 0x07ffffffLU #define OPSHIFT ((unsigned)27) #define OP(n) ((n)&OPRMASK) #define OPND(n) ((n)&OPDMASK) #define SOP(op, opnd) ((op)|(opnd)) /* operators meaning operand */ /* (back, fwd are offsets) */ #define OEND (1LU<<OPSHIFT) /* endmarker - */ #define OCHAR (2LU<<OPSHIFT) /* character unsigned char */ #define OBOL (3LU<<OPSHIFT) /* left anchor - */ #define OEOL (4LU<<OPSHIFT) /* right anchor - */ #define OANY (5LU<<OPSHIFT) /* . - */ #define OANYOF (6LU<<OPSHIFT) /* [...] set number */ #define OBACK_ (7LU<<OPSHIFT) /* begin \d paren number */ #define O_BACK (8LU<<OPSHIFT) /* end \d paren number */ #define OPLUS_ (9LU<<OPSHIFT) /* + prefix fwd to suffix */ #define O_PLUS (10LU<<OPSHIFT) /* + suffix back to prefix */ #define OQUEST_ (11LU<<OPSHIFT) /* ? prefix fwd to suffix */ #define O_QUEST (12LU<<OPSHIFT) /* ? suffix back to prefix */ #define OLPAREN (13LU<<OPSHIFT) /* ( fwd to ) */ #define ORPAREN (14LU<<OPSHIFT) /* ) back to ( */ #define OCH_ (15LU<<OPSHIFT) /* begin choice fwd to OOR2 */ #define OOR1 (16LU<<OPSHIFT) /* | pt. 1 back to OOR1 or OCH_ */ #define OOR2 (17LU<<OPSHIFT) /* | pt. 2 fwd to OOR2 or O_CH */ #define O_CH (18LU<<OPSHIFT) /* end choice back to OOR1 */ #define OBOW (19LU<<OPSHIFT) /* begin word - */ #define OEOW (20LU<<OPSHIFT) /* end word - */ /* * Structure for [] character-set representation. Character sets are * done as bit vectors, grouped 8 to a byte vector for compactness. * The individual set therefore has both a pointer to the byte vector * and a mask to pick out the relevant bit of each byte. A hash code * simplifies testing whether two sets could be identical. * * This will get trickier for multicharacter collating elements. As * preliminary hooks for dealing with such things, we also carry along * a string of multi-character elements, and decide the size of the * vectors at run time. */ typedef struct { uch *ptr; /* -> uch [csetsize] */ uch mask; /* bit within array */ uch hash; /* hash code */ size_t smultis; char *multis; /* -> char[smulti] ab\0cd\0ef\0\0 */ } cset; /* note that CHadd and CHsub are unsafe, and CHIN doesn't yield 0/1 */ #define CHadd(cs, c) ((cs)->ptr[(uch)(c)] |= (cs)->mask, (cs)->hash += (c)) #define CHsub(cs, c) ((cs)->ptr[(uch)(c)] &= ~(cs)->mask, (cs)->hash -= (c)) #define CHIN(cs, c) ((cs)->ptr[(uch)(c)] & (cs)->mask) #define MCadd(p, cs, cp) mcadd(p, cs, cp) /* llvm_regcomp() internal fns */ #define MCsub(p, cs, cp) mcsub(p, cs, cp) #define MCin(p, cs, cp) mcin(p, cs, cp) /* stuff for character categories */ typedef unsigned char cat_t; /* * main compiled-expression structure */ struct re_guts { int magic; # define MAGIC2 ((('R'^0200)<<8)|'E') sop *strip; /* malloced area for strip */ int csetsize; /* number of bits in a cset vector */ int ncsets; /* number of csets in use */ cset *sets; /* -> cset [ncsets] */ uch *setbits; /* -> uch[csetsize][ncsets/CHAR_BIT] */ int cflags; /* copy of llvm_regcomp() cflags argument */ sopno nstates; /* = number of sops */ sopno firststate; /* the initial OEND (normally 0) */ sopno laststate; /* the final OEND */ int iflags; /* internal flags */ # define USEBOL 01 /* used ^ */ # define USEEOL 02 /* used $ */ # define REGEX_BAD 04 /* something wrong */ int nbol; /* number of ^ used */ int neol; /* number of $ used */ int ncategories; /* how many character categories */ cat_t *categories; /* ->catspace[-CHAR_MIN] */ char *must; /* match must contain this string */ int mlen; /* length of must */ size_t nsub; /* copy of re_nsub */ int backrefs; /* does it use back references? */ sopno nplus; /* how deep does it nest +s? */ /* catspace must be last */ cat_t catspace[1]; /* actually [NC] */ }; /* misc utilities */ #define OUT (CHAR_MAX+1) /* a non-character value */ #define ISWORD(c) (isalnum(c&0xff) || (c) == '_') #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/regerror.c
/*- * This code is derived from OpenBSD's libc/regex, original license follows: * * Copyright (c) 1992, 1993, 1994 Henry Spencer. * Copyright (c) 1992, 1993, 1994 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Henry Spencer. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)regerror.c 8.4 (Berkeley) 3/20/94 */ #include <sys/types.h> #include <stdio.h> #include <string.h> #include <ctype.h> #include <limits.h> #include <stdlib.h> #include "regex_impl.h" #include "regutils.h" #include "dxc/WinAdapter.h" // HLSL Change #ifdef _MSC_VER #define snprintf _snprintf #endif static const char *regatoi( const llvm_regex_t *preg, char *localbuf, int localbufsize ); static struct rerr { int code; const char *name; const char *explain; } rerrs[] = { { REG_NOMATCH, "REG_NOMATCH", "llvm_regexec() failed to match" }, { REG_BADPAT, "REG_BADPAT", "invalid regular expression" }, { REG_ECOLLATE, "REG_ECOLLATE", "invalid collating element" }, { REG_ECTYPE, "REG_ECTYPE", "invalid character class" }, { REG_EESCAPE, "REG_EESCAPE", "trailing backslash (\\)" }, { REG_ESUBREG, "REG_ESUBREG", "invalid backreference number" }, { REG_EBRACK, "REG_EBRACK", "brackets ([ ]) not balanced" }, { REG_EPAREN, "REG_EPAREN", "parentheses not balanced" }, { REG_EBRACE, "REG_EBRACE", "braces not balanced" }, { REG_BADBR, "REG_BADBR", "invalid repetition count(s)" }, { REG_ERANGE, "REG_ERANGE", "invalid character range" }, { REG_ESPACE, "REG_ESPACE", "out of memory" }, { REG_BADRPT, "REG_BADRPT", "repetition-operator operand invalid" }, { REG_EMPTY, "REG_EMPTY", "empty (sub)expression" }, { REG_ASSERT, "REG_ASSERT", "\"can't happen\" -- you found a bug" }, { REG_INVARG, "REG_INVARG", "invalid argument to regex routine" }, { 0, "", "*** unknown regexp error code ***" } }; /* - llvm_regerror - the interface to error numbers = extern size_t llvm_regerror(int, const llvm_regex_t *, char *, size_t); */ /* ARGSUSED */ size_t llvm_regerror(int errcode, const llvm_regex_t *preg, char *errbuf, size_t errbuf_size) { struct rerr *r; size_t len; int target = errcode &~ REG_ITOA; const char *s; char convbuf[50]; if (errcode == REG_ATOI) s = regatoi(preg, convbuf, sizeof convbuf); else { for (r = rerrs; r->code != 0; r++) if (r->code == target) break; if (errcode&REG_ITOA) { if (r->code != 0) { assert(strlen(r->name) < sizeof(convbuf)); (void) llvm_strlcpy(convbuf, r->name, sizeof convbuf); } else // Begin HLSL Change #ifdef _WIN32 (void)_snprintf_s(convbuf, _countof(convbuf), _countof(convbuf), "REG_0x%x", target); #else (void)snprintf(convbuf, sizeof convbuf, "REG_0x%x", target); #endif // WIN32 // End HLSL Change s = convbuf; } else s = r->explain; } len = strlen(s) + 1; if (errbuf_size > 0) { llvm_strlcpy(errbuf, s, errbuf_size); } return(len); } /* - regatoi - internal routine to implement REG_ATOI */ static const char * regatoi( const llvm_regex_t *preg, char *localbuf, int localbufsize) { struct rerr *r; for (r = rerrs; r->code != 0; r++) if (strcmp(r->name, preg->re_endp) == 0) break; if (r->code == 0) return("0"); #ifdef _WIN32 (void)_snprintf_s(localbuf, localbufsize, localbufsize, "%d", r->code); #else (void)snprintf(localbuf, localbufsize, "%d", r->code); #endif // WIN32 return(localbuf); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/StringRef.cpp
//===-- StringRef.cpp - Lightweight String References ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/ADT/StringRef.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/edit_distance.h" #include <bitset> using namespace llvm; // MSVC emits references to this into the translation units which reference it. #ifndef _MSC_VER const size_t StringRef::npos; #endif static char ascii_tolower(char x) { if (x >= 'A' && x <= 'Z') return x - 'A' + 'a'; return x; } static char ascii_toupper(char x) { if (x >= 'a' && x <= 'z') return x - 'a' + 'A'; return x; } static bool ascii_isdigit(char x) { return x >= '0' && x <= '9'; } // strncasecmp() is not available on non-POSIX systems, so define an // alternative function here. static int ascii_strncasecmp(const char *LHS, const char *RHS, size_t Length) { for (size_t I = 0; I < Length; ++I) { unsigned char LHC = ascii_tolower(LHS[I]); unsigned char RHC = ascii_tolower(RHS[I]); if (LHC != RHC) return LHC < RHC ? -1 : 1; } return 0; } /// compare_lower - Compare strings, ignoring case. int StringRef::compare_lower(StringRef RHS) const { if (int Res = ascii_strncasecmp(Data, RHS.Data, std::min(Length, RHS.Length))) return Res; if (Length == RHS.Length) return 0; return Length < RHS.Length ? -1 : 1; } /// Check if this string starts with the given \p Prefix, ignoring case. bool StringRef::startswith_lower(StringRef Prefix) const { return Length >= Prefix.Length && ascii_strncasecmp(Data, Prefix.Data, Prefix.Length) == 0; } /// Check if this string ends with the given \p Suffix, ignoring case. bool StringRef::endswith_lower(StringRef Suffix) const { return Length >= Suffix.Length && ascii_strncasecmp(end() - Suffix.Length, Suffix.Data, Suffix.Length) == 0; } /// compare_numeric - Compare strings, handle embedded numbers. int StringRef::compare_numeric(StringRef RHS) const { for (size_t I = 0, E = std::min(Length, RHS.Length); I != E; ++I) { // Check for sequences of digits. if (ascii_isdigit(Data[I]) && ascii_isdigit(RHS.Data[I])) { // The longer sequence of numbers is considered larger. // This doesn't really handle prefixed zeros well. size_t J; for (J = I + 1; J != E + 1; ++J) { bool ld = J < Length && ascii_isdigit(Data[J]); bool rd = J < RHS.Length && ascii_isdigit(RHS.Data[J]); if (ld != rd) return rd ? -1 : 1; if (!rd) break; } // The two number sequences have the same length (J-I), just memcmp them. if (int Res = compareMemory(Data + I, RHS.Data + I, J - I)) return Res < 0 ? -1 : 1; // Identical number sequences, continue search after the numbers. I = J - 1; continue; } if (Data[I] != RHS.Data[I]) return (unsigned char)Data[I] < (unsigned char)RHS.Data[I] ? -1 : 1; } if (Length == RHS.Length) return 0; return Length < RHS.Length ? -1 : 1; } // Compute the edit distance between the two given strings. unsigned StringRef::edit_distance(llvm::StringRef Other, bool AllowReplacements, unsigned MaxEditDistance) const { return llvm::ComputeEditDistance( makeArrayRef(data(), size()), makeArrayRef(Other.data(), Other.size()), AllowReplacements, MaxEditDistance); } //===----------------------------------------------------------------------===// // String Operations //===----------------------------------------------------------------------===// std::string StringRef::lower() const { std::string Result(size(), char()); for (size_type i = 0, e = size(); i != e; ++i) { Result[i] = ascii_tolower(Data[i]); } return Result; } std::string StringRef::upper() const { std::string Result(size(), char()); for (size_type i = 0, e = size(); i != e; ++i) { Result[i] = ascii_toupper(Data[i]); } return Result; } //===----------------------------------------------------------------------===// // String Searching //===----------------------------------------------------------------------===// /// find - Search for the first string \arg Str in the string. /// /// \return - The index of the first occurrence of \arg Str, or npos if not /// found. size_t StringRef::find(StringRef Str, size_t From) const { size_t N = Str.size(); if (N > Length) return npos; // For short haystacks or unsupported needles fall back to the naive algorithm if (Length < 16 || N > 255 || N == 0) { for (size_t e = Length - N + 1, i = std::min(From, e); i != e; ++i) if (substr(i, N).equals(Str)) return i; return npos; } if (From >= Length) return npos; // Build the bad char heuristic table, with uint8_t to reduce cache thrashing. uint8_t BadCharSkip[256]; std::memset(BadCharSkip, N, 256); for (unsigned i = 0; i != N-1; ++i) BadCharSkip[(uint8_t)Str[i]] = N-1-i; unsigned Len = Length-From, Pos = From; while (Len >= N) { if (substr(Pos, N).equals(Str)) // See if this is the correct substring. return Pos; // Otherwise skip the appropriate number of bytes. uint8_t Skip = BadCharSkip[(uint8_t)(*this)[Pos+N-1]]; Len -= Skip; Pos += Skip; } return npos; } /// rfind - Search for the last string \arg Str in the string. /// /// \return - The index of the last occurrence of \arg Str, or npos if not /// found. size_t StringRef::rfind(StringRef Str) const { size_t N = Str.size(); if (N > Length) return npos; for (size_t i = Length - N + 1, e = 0; i != e;) { --i; if (substr(i, N).equals(Str)) return i; } return npos; } /// find_first_of - Find the first character in the string that is in \arg /// Chars, or npos if not found. /// /// Note: O(size() + Chars.size()) StringRef::size_type StringRef::find_first_of(StringRef Chars, size_t From) const { std::bitset<1 << CHAR_BIT> CharBits; for (size_type i = 0; i != Chars.size(); ++i) CharBits.set((unsigned char)Chars[i]); for (size_type i = std::min(From, Length), e = Length; i != e; ++i) if (CharBits.test((unsigned char)Data[i])) return i; return npos; } /// find_first_not_of - Find the first character in the string that is not /// \arg C or npos if not found. StringRef::size_type StringRef::find_first_not_of(char C, size_t From) const { for (size_type i = std::min(From, Length), e = Length; i != e; ++i) if (Data[i] != C) return i; return npos; } /// find_first_not_of - Find the first character in the string that is not /// in the string \arg Chars, or npos if not found. /// /// Note: O(size() + Chars.size()) StringRef::size_type StringRef::find_first_not_of(StringRef Chars, size_t From) const { std::bitset<1 << CHAR_BIT> CharBits; for (size_type i = 0; i != Chars.size(); ++i) CharBits.set((unsigned char)Chars[i]); for (size_type i = std::min(From, Length), e = Length; i != e; ++i) if (!CharBits.test((unsigned char)Data[i])) return i; return npos; } /// find_last_of - Find the last character in the string that is in \arg C, /// or npos if not found. /// /// Note: O(size() + Chars.size()) StringRef::size_type StringRef::find_last_of(StringRef Chars, size_t From) const { std::bitset<1 << CHAR_BIT> CharBits; for (size_type i = 0; i != Chars.size(); ++i) CharBits.set((unsigned char)Chars[i]); for (size_type i = std::min(From, Length) - 1, e = -1; i != e; --i) if (CharBits.test((unsigned char)Data[i])) return i; return npos; } /// find_last_not_of - Find the last character in the string that is not /// \arg C, or npos if not found. StringRef::size_type StringRef::find_last_not_of(char C, size_t From) const { for (size_type i = std::min(From, Length) - 1, e = -1; i != e; --i) if (Data[i] != C) return i; return npos; } /// find_last_not_of - Find the last character in the string that is not in /// \arg Chars, or npos if not found. /// /// Note: O(size() + Chars.size()) StringRef::size_type StringRef::find_last_not_of(StringRef Chars, size_t From) const { std::bitset<1 << CHAR_BIT> CharBits; for (size_type i = 0, e = Chars.size(); i != e; ++i) CharBits.set((unsigned char)Chars[i]); for (size_type i = std::min(From, Length) - 1, e = -1; i != e; --i) if (!CharBits.test((unsigned char)Data[i])) return i; return npos; } void StringRef::split(SmallVectorImpl<StringRef> &A, StringRef Separators, int MaxSplit, bool KeepEmpty) const { StringRef rest = *this; // rest.data() is used to distinguish cases like "a," that splits into // "a" + "" and "a" that splits into "a" + 0. for (int splits = 0; rest.data() != nullptr && (MaxSplit < 0 || splits < MaxSplit); ++splits) { std::pair<StringRef, StringRef> p = rest.split(Separators); if (KeepEmpty || p.first.size() != 0) A.push_back(p.first); rest = p.second; } // If we have a tail left, add it. if (rest.data() != nullptr && (rest.size() != 0 || KeepEmpty)) A.push_back(rest); } //===----------------------------------------------------------------------===// // Helpful Algorithms //===----------------------------------------------------------------------===// /// count - Return the number of non-overlapped occurrences of \arg Str in /// the string. size_t StringRef::count(StringRef Str) const { size_t Count = 0; size_t N = Str.size(); if (N > Length) return 0; for (size_t i = 0, e = Length - N + 1; i != e; ++i) if (substr(i, N).equals(Str)) ++Count; return Count; } static unsigned GetAutoSenseRadix(StringRef &Str) { if (Str.startswith("0x")) { Str = Str.substr(2); return 16; } if (Str.startswith("0b")) { Str = Str.substr(2); return 2; } if (Str.startswith("0o")) { Str = Str.substr(2); return 8; } if (Str.startswith("0")) return 8; return 10; } /// GetAsUnsignedInteger - Workhorse method that converts a integer character /// sequence of radix up to 36 to an unsigned long long value. bool llvm::getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result) { // Autosense radix if not specified. if (Radix == 0) Radix = GetAutoSenseRadix(Str); // Empty strings (after the radix autosense) are invalid. if (Str.empty()) return true; // Parse all the bytes of the string given this radix. Watch for overflow. Result = 0; while (!Str.empty()) { unsigned CharVal; if (Str[0] >= '0' && Str[0] <= '9') CharVal = Str[0]-'0'; else if (Str[0] >= 'a' && Str[0] <= 'z') CharVal = Str[0]-'a'+10; else if (Str[0] >= 'A' && Str[0] <= 'Z') CharVal = Str[0]-'A'+10; else return true; // If the parsed value is larger than the integer radix, the string is // invalid. if (CharVal >= Radix) return true; // Add in this character. unsigned long long PrevResult = Result; Result = Result*Radix+CharVal; // Check for overflow by shifting back and seeing if bits were lost. if (Result/Radix < PrevResult) return true; Str = Str.substr(1); } return false; } bool llvm::getAsSignedInteger(StringRef Str, unsigned Radix, long long &Result) { unsigned long long ULLVal; // Handle positive strings first. if (Str.empty() || Str.front() != '-') { if (getAsUnsignedInteger(Str, Radix, ULLVal) || // Check for value so large it overflows a signed value. (long long)ULLVal < 0) return true; Result = ULLVal; return false; } // Get the positive part of the value. if (getAsUnsignedInteger(Str.substr(1), Radix, ULLVal) || // Reject values so large they'd overflow as negative signed, but allow // "-0". This negates the unsigned so that the negative isn't undefined // on signed overflow. (long long)-ULLVal > 0) return true; Result = -ULLVal; return false; } bool StringRef::getAsInteger(unsigned Radix, APInt &Result) const { StringRef Str = *this; // Autosense radix if not specified. if (Radix == 0) Radix = GetAutoSenseRadix(Str); assert(Radix > 1 && Radix <= 36); // Empty strings (after the radix autosense) are invalid. if (Str.empty()) return true; // Skip leading zeroes. This can be a significant improvement if // it means we don't need > 64 bits. while (!Str.empty() && Str.front() == '0') Str = Str.substr(1); // If it was nothing but zeroes.... if (Str.empty()) { Result = APInt(64, 0); return false; } // (Over-)estimate the required number of bits. unsigned Log2Radix = 0; while ((1U << Log2Radix) < Radix) Log2Radix++; bool IsPowerOf2Radix = ((1U << Log2Radix) == Radix); unsigned BitWidth = Log2Radix * Str.size(); if (BitWidth < Result.getBitWidth()) BitWidth = Result.getBitWidth(); // don't shrink the result else if (BitWidth > Result.getBitWidth()) Result = Result.zext(BitWidth); APInt RadixAP, CharAP; // unused unless !IsPowerOf2Radix if (!IsPowerOf2Radix) { // These must have the same bit-width as Result. RadixAP = APInt(BitWidth, Radix); CharAP = APInt(BitWidth, 0); } // Parse all the bytes of the string given this radix. Result = 0; while (!Str.empty()) { unsigned CharVal; if (Str[0] >= '0' && Str[0] <= '9') CharVal = Str[0]-'0'; else if (Str[0] >= 'a' && Str[0] <= 'z') CharVal = Str[0]-'a'+10; else if (Str[0] >= 'A' && Str[0] <= 'Z') CharVal = Str[0]-'A'+10; else return true; // If the parsed value is larger than the integer radix, the string is // invalid. if (CharVal >= Radix) return true; // Add in this character. if (IsPowerOf2Radix) { Result <<= Log2Radix; Result |= CharVal; } else { Result *= RadixAP; CharAP = CharVal; Result += CharAP; } Str = Str.substr(1); } return false; } // Implementation of StringRef hashing. hash_code llvm::hash_value(StringRef S) { return hash_combine_range(S.begin(), S.end()); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/regcname.h
/*- * This code is derived from OpenBSD's libc/regex, original license follows: * * Copyright (c) 1992, 1993, 1994 Henry Spencer. * Copyright (c) 1992, 1993, 1994 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Henry Spencer. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)cname.h 8.3 (Berkeley) 3/20/94 */ #ifndef LLVM_SUPPORT_REGCNAME_H #define LLVM_SUPPORT_REGCNAME_H #include <stddef.h> /* character-name table */ static struct cname { const char *name; char code; } cnames[] = { { "NUL", '\0' }, { "SOH", '\001' }, { "STX", '\002' }, { "ETX", '\003' }, { "EOT", '\004' }, { "ENQ", '\005' }, { "ACK", '\006' }, { "BEL", '\007' }, { "alert", '\007' }, { "BS", '\010' }, { "backspace", '\b' }, { "HT", '\011' }, { "tab", '\t' }, { "LF", '\012' }, { "newline", '\n' }, { "VT", '\013' }, { "vertical-tab", '\v' }, { "FF", '\014' }, { "form-feed", '\f' }, { "CR", '\015' }, { "carriage-return", '\r' }, { "SO", '\016' }, { "SI", '\017' }, { "DLE", '\020' }, { "DC1", '\021' }, { "DC2", '\022' }, { "DC3", '\023' }, { "DC4", '\024' }, { "NAK", '\025' }, { "SYN", '\026' }, { "ETB", '\027' }, { "CAN", '\030' }, { "EM", '\031' }, { "SUB", '\032' }, { "ESC", '\033' }, { "IS4", '\034' }, { "FS", '\034' }, { "IS3", '\035' }, { "GS", '\035' }, { "IS2", '\036' }, { "RS", '\036' }, { "IS1", '\037' }, { "US", '\037' }, { "space", ' ' }, { "exclamation-mark", '!' }, { "quotation-mark", '"' }, { "number-sign", '#' }, { "dollar-sign", '$' }, { "percent-sign", '%' }, { "ampersand", '&' }, { "apostrophe", '\'' }, { "left-parenthesis", '(' }, { "right-parenthesis", ')' }, { "asterisk", '*' }, { "plus-sign", '+' }, { "comma", ',' }, { "hyphen", '-' }, { "hyphen-minus", '-' }, { "period", '.' }, { "full-stop", '.' }, { "slash", '/' }, { "solidus", '/' }, { "zero", '0' }, { "one", '1' }, { "two", '2' }, { "three", '3' }, { "four", '4' }, { "five", '5' }, { "six", '6' }, { "seven", '7' }, { "eight", '8' }, { "nine", '9' }, { "colon", ':' }, { "semicolon", ';' }, { "less-than-sign", '<' }, { "equals-sign", '=' }, { "greater-than-sign", '>' }, { "question-mark", '?' }, { "commercial-at", '@' }, { "left-square-bracket", '[' }, { "backslash", '\\' }, { "reverse-solidus", '\\' }, { "right-square-bracket", ']' }, { "circumflex", '^' }, { "circumflex-accent", '^' }, { "underscore", '_' }, { "low-line", '_' }, { "grave-accent", '`' }, { "left-brace", '{' }, { "left-curly-bracket", '{' }, { "vertical-line", '|' }, { "right-brace", '}' }, { "right-curly-bracket", '}' }, { "tilde", '~' }, { "DEL", '\177' }, { NULL, 0 } }; #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/MemoryBuffer.cpp
//===--- MemoryBuffer.cpp - Memory Buffer implementation ------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the MemoryBuffer interface. // //===----------------------------------------------------------------------===// #include "llvm/Support/MemoryBuffer.h" #include "llvm/ADT/SmallString.h" #include "llvm/Config/config.h" #include "llvm/Support/Errc.h" #include "llvm/Support/Errno.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/Path.h" #include "llvm/Support/Process.h" #include "llvm/Support/Program.h" #include <cassert> #include <cerrno> #include <cstring> #include <new> #include <sys/types.h> #include <system_error> #if !defined(_MSC_VER) && !defined(__MINGW32__) #include <unistd.h> #else #include <io.h> #endif using namespace llvm; //===----------------------------------------------------------------------===// // MemoryBuffer implementation itself. //===----------------------------------------------------------------------===// MemoryBuffer::~MemoryBuffer() { } /// init - Initialize this MemoryBuffer as a reference to externally allocated /// memory, memory that we know is already null terminated. void MemoryBuffer::init(const char *BufStart, const char *BufEnd, bool RequiresNullTerminator) { assert((!RequiresNullTerminator || BufEnd[0] == 0) && "Buffer is not null terminated!"); BufferStart = BufStart; BufferEnd = BufEnd; } //===----------------------------------------------------------------------===// // MemoryBufferMem implementation. //===----------------------------------------------------------------------===// /// CopyStringRef - Copies contents of a StringRef into a block of memory and /// null-terminates it. static void CopyStringRef(char *Memory, StringRef Data) { if (!Data.empty()) memcpy(Memory, Data.data(), Data.size()); Memory[Data.size()] = 0; // Null terminate string. } namespace { struct NamedBufferAlloc { const Twine &Name; NamedBufferAlloc(const Twine &Name) : Name(Name) {} }; } void *operator new(size_t N, const NamedBufferAlloc &Alloc) { SmallString<256> NameBuf; StringRef NameRef = Alloc.Name.toStringRef(NameBuf); char *Mem = static_cast<char *>(operator new(N + NameRef.size() + 1)); CopyStringRef(Mem + N, NameRef); return Mem; } namespace { /// MemoryBufferMem - Named MemoryBuffer pointing to a block of memory. class MemoryBufferMem : public MemoryBuffer { public: MemoryBufferMem(StringRef InputData, bool RequiresNullTerminator) { init(InputData.begin(), InputData.end(), RequiresNullTerminator); } // Disable sized deallocation for MemoryByfferMem, because it has // tail-allocated data. // (See llvm commit 21c303e9eadfbd2d685665176159f5f4738169b1) void operator delete(void *p) { ::operator delete(p); } const char *getBufferIdentifier() const override { // The name is stored after the class itself. return reinterpret_cast<const char*>(this + 1); } BufferKind getBufferKind() const override { return MemoryBuffer_Malloc; } }; } static ErrorOr<std::unique_ptr<MemoryBuffer>> getFileAux(const Twine &Filename, int64_t FileSize, uint64_t MapSize, uint64_t Offset, bool RequiresNullTerminator, bool IsVolatileSize); std::unique_ptr<MemoryBuffer> MemoryBuffer::getMemBuffer(StringRef InputData, StringRef BufferName, bool RequiresNullTerminator) { auto *Ret = new (NamedBufferAlloc(BufferName)) MemoryBufferMem(InputData, RequiresNullTerminator); return std::unique_ptr<MemoryBuffer>(Ret); } std::unique_ptr<MemoryBuffer> MemoryBuffer::getMemBuffer(MemoryBufferRef Ref, bool RequiresNullTerminator) { return std::unique_ptr<MemoryBuffer>(getMemBuffer( Ref.getBuffer(), Ref.getBufferIdentifier(), RequiresNullTerminator)); } std::unique_ptr<MemoryBuffer> MemoryBuffer::getMemBufferCopy(StringRef InputData, const Twine &BufferName) { std::unique_ptr<MemoryBuffer> Buf = getNewUninitMemBuffer(InputData.size(), BufferName); if (!Buf) return nullptr; memcpy(const_cast<char*>(Buf->getBufferStart()), InputData.data(), InputData.size()); return Buf; } std::unique_ptr<MemoryBuffer> MemoryBuffer::getNewUninitMemBuffer(size_t Size, const Twine &BufferName) { // Allocate space for the MemoryBuffer, the data and the name. It is important // that MemoryBuffer and data are aligned so PointerIntPair works with them. // TODO: Is 16-byte alignment enough? We copy small object files with large // alignment expectations into this buffer. SmallString<256> NameBuf; StringRef NameRef = BufferName.toStringRef(NameBuf); size_t AlignedStringLen = RoundUpToAlignment(sizeof(MemoryBufferMem) + NameRef.size() + 1, 16); size_t RealLen = AlignedStringLen + Size + 1; char *Mem = static_cast<char*>(operator new(RealLen, std::nothrow)); if (!Mem) return nullptr; // The name is stored after the class itself. CopyStringRef(Mem + sizeof(MemoryBufferMem), NameRef); // The buffer begins after the name and must be aligned. char *Buf = Mem + AlignedStringLen; Buf[Size] = 0; // Null terminate buffer. auto *Ret = new (Mem) MemoryBufferMem(StringRef(Buf, Size), true); return std::unique_ptr<MemoryBuffer>(Ret); } std::unique_ptr<MemoryBuffer> MemoryBuffer::getNewMemBuffer(size_t Size, StringRef BufferName) { std::unique_ptr<MemoryBuffer> SB = getNewUninitMemBuffer(Size, BufferName); if (!SB) return nullptr; memset(const_cast<char*>(SB->getBufferStart()), 0, Size); return SB; } ErrorOr<std::unique_ptr<MemoryBuffer>> MemoryBuffer::getFileOrSTDIN(const Twine &Filename, int64_t FileSize) { SmallString<256> NameBuf; StringRef NameRef = Filename.toStringRef(NameBuf); if (NameRef == "-") return getSTDIN(); return getFile(Filename, FileSize); } ErrorOr<std::unique_ptr<MemoryBuffer>> MemoryBuffer::getFileSlice(const Twine &FilePath, uint64_t MapSize, uint64_t Offset) { return getFileAux(FilePath, -1, MapSize, Offset, false, false); } //===----------------------------------------------------------------------===// // MemoryBuffer::getFile implementation. //===----------------------------------------------------------------------===// namespace { /// \brief Memory maps a file descriptor using sys::fs::mapped_file_region. /// /// This handles converting the offset into a legal offset on the platform. class MemoryBufferMMapFile : public MemoryBuffer { sys::fs::mapped_file_region MFR; static uint64_t getLegalMapOffset(uint64_t Offset) { return Offset & ~(sys::fs::mapped_file_region::alignment() - 1); } static uint64_t getLegalMapSize(uint64_t Len, uint64_t Offset) { return Len + (Offset - getLegalMapOffset(Offset)); } const char *getStart(uint64_t Len, uint64_t Offset) { return MFR.const_data() + (Offset - getLegalMapOffset(Offset)); } public: MemoryBufferMMapFile(bool RequiresNullTerminator, int FD, uint64_t Len, uint64_t Offset, std::error_code &EC) : MFR(FD, sys::fs::mapped_file_region::readonly, getLegalMapSize(Len, Offset), getLegalMapOffset(Offset), EC) { if (!EC) { const char *Start = getStart(Len, Offset); init(Start, Start + Len, RequiresNullTerminator); } } // Disable sized deallocation for MemoryByfferMem, because it has // tail-allocated data. // (See llvm commit 21c303e9eadfbd2d685665176159f5f4738169b1) void operator delete(void *p) { ::operator delete(p); } const char *getBufferIdentifier() const override { // The name is stored after the class itself. return reinterpret_cast<const char *>(this + 1); } BufferKind getBufferKind() const override { return MemoryBuffer_MMap; } }; } static ErrorOr<std::unique_ptr<MemoryBuffer>> getMemoryBufferForStream(int FD, const Twine &BufferName) { const ssize_t ChunkSize = 4096*4; const ssize_t InitialChunkSize = 4096*2; // HLSL Change - be more conservative on how much stack space we start with SmallString<InitialChunkSize> Buffer; ssize_t ReadBytes; // Read into Buffer until we hit EOF. do { Buffer.reserve(Buffer.size() + ChunkSize); ReadBytes = llvm::sys::fs::msf_read(FD, Buffer.end(), ChunkSize); if (ReadBytes == -1) { if (errno == EINTR) continue; return std::error_code(errno, std::generic_category()); } Buffer.set_size(Buffer.size() + ReadBytes); } while (ReadBytes != 0); return MemoryBuffer::getMemBufferCopy(Buffer, BufferName); } ErrorOr<std::unique_ptr<MemoryBuffer>> MemoryBuffer::getFile(const Twine &Filename, int64_t FileSize, bool RequiresNullTerminator, bool IsVolatileSize) { return getFileAux(Filename, FileSize, FileSize, 0, RequiresNullTerminator, IsVolatileSize); } static ErrorOr<std::unique_ptr<MemoryBuffer>> getOpenFileImpl(int FD, const Twine &Filename, uint64_t FileSize, uint64_t MapSize, int64_t Offset, bool RequiresNullTerminator, bool IsVolatileSize); static ErrorOr<std::unique_ptr<MemoryBuffer>> getFileAux(const Twine &Filename, int64_t FileSize, uint64_t MapSize, uint64_t Offset, bool RequiresNullTerminator, bool IsVolatileSize) { int FD; std::error_code EC = sys::fs::openFileForRead(Filename, FD); if (EC) return EC; ErrorOr<std::unique_ptr<MemoryBuffer>> Ret = getOpenFileImpl(FD, Filename, FileSize, MapSize, Offset, RequiresNullTerminator, IsVolatileSize); llvm::sys::fs::msf_close(FD); // HLSL Change - use msf_close return Ret; } static bool shouldUseMmap(int FD, size_t FileSize, size_t MapSize, off_t Offset, bool RequiresNullTerminator, int PageSize, bool IsVolatileSize) { // mmap may leave the buffer without null terminator if the file size changed // by the time the last page is mapped in, so avoid it if the file size is // likely to change. if (IsVolatileSize) return false; // We don't use mmap for small files because this can severely fragment our // address space. if (MapSize < 4 * 4096 || MapSize < (unsigned)PageSize) return false; if (!RequiresNullTerminator) return true; // If we don't know the file size, use fstat to find out. fstat on an open // file descriptor is cheaper than stat on a random path. // FIXME: this chunk of code is duplicated, but it avoids a fstat when // RequiresNullTerminator = false and MapSize != -1. if (FileSize == size_t(-1)) { sys::fs::file_status Status; if (sys::fs::status(FD, Status)) return false; FileSize = Status.getSize(); } // If we need a null terminator and the end of the map is inside the file, // we cannot use mmap. size_t End = Offset + MapSize; assert(End <= FileSize); if (End != FileSize) return false; // Don't try to map files that are exactly a multiple of the system page size // if we need a null terminator. if ((FileSize & (PageSize -1)) == 0) return false; #if defined(__CYGWIN__) // Don't try to map files that are exactly a multiple of the physical page size // if we need a null terminator. // FIXME: We should reorganize again getPageSize() on Win32. if ((FileSize & (4096 - 1)) == 0) return false; #endif return true; } static ErrorOr<std::unique_ptr<MemoryBuffer>> getOpenFileImpl(int FD, const Twine &Filename, uint64_t FileSize, uint64_t MapSize, int64_t Offset, bool RequiresNullTerminator, bool IsVolatileSize) { static int PageSize = sys::Process::getPageSize(); // Default is to map the full file. if (MapSize == uint64_t(-1)) { // If we don't know the file size, use fstat to find out. fstat on an open // file descriptor is cheaper than stat on a random path. if (FileSize == uint64_t(-1)) { sys::fs::file_status Status; std::error_code EC = sys::fs::status(FD, Status); if (EC) return EC; // If this not a file or a block device (e.g. it's a named pipe // or character device), we can't trust the size. Create the memory // buffer by copying off the stream. sys::fs::file_type Type = Status.type(); if (Type != sys::fs::file_type::regular_file && Type != sys::fs::file_type::block_file) return getMemoryBufferForStream(FD, Filename); FileSize = Status.getSize(); } MapSize = FileSize; } if (shouldUseMmap(FD, FileSize, MapSize, Offset, RequiresNullTerminator, PageSize, IsVolatileSize)) { std::error_code EC; std::unique_ptr<MemoryBuffer> Result( new (NamedBufferAlloc(Filename)) MemoryBufferMMapFile(RequiresNullTerminator, FD, MapSize, Offset, EC)); if (!EC) { return ErrorOr( std::move(Result)); // HLSL Change - Fix redundant move warning. } } std::unique_ptr<MemoryBuffer> Buf = MemoryBuffer::getNewUninitMemBuffer(MapSize, Filename); if (!Buf) { // Failed to create a buffer. The only way it can fail is if // new(std::nothrow) returns 0. return make_error_code(errc::not_enough_memory); } char *BufPtr = const_cast<char *>(Buf->getBufferStart()); size_t BytesLeft = MapSize; #undef HAVE_PREAD // HLSL Change - pread bypasses needed layers #ifndef HAVE_PREAD if (llvm::sys::fs::msf_lseek(FD, Offset, SEEK_SET) == -1) // HLSL Change - use msf_lseek return std::error_code(errno, std::generic_category()); #endif while (BytesLeft) { #ifdef HAVE_PREAD ssize_t NumRead = ::pread(FD, BufPtr, BytesLeft, MapSize-BytesLeft+Offset); #else ssize_t NumRead = ::llvm::sys::fs::msf_read(FD, BufPtr, BytesLeft); #endif if (NumRead == -1) { if (errno == EINTR) continue; // Error while reading. return std::error_code(errno, std::generic_category()); } if (NumRead == 0) { memset(BufPtr, 0, BytesLeft); // zero-initialize rest of the buffer. break; } BytesLeft -= NumRead; BufPtr += NumRead; } return ErrorOr(std::move(Buf)); // HLSL Change - Fix redundant move warning. } ErrorOr<std::unique_ptr<MemoryBuffer>> MemoryBuffer::getOpenFile(int FD, const Twine &Filename, uint64_t FileSize, bool RequiresNullTerminator, bool IsVolatileSize) { return getOpenFileImpl(FD, Filename, FileSize, FileSize, 0, RequiresNullTerminator, IsVolatileSize); } ErrorOr<std::unique_ptr<MemoryBuffer>> MemoryBuffer::getOpenFileSlice(int FD, const Twine &Filename, uint64_t MapSize, int64_t Offset) { assert(MapSize != uint64_t(-1)); return getOpenFileImpl(FD, Filename, -1, MapSize, Offset, false, /*IsVolatileSize*/ false); } ErrorOr<std::unique_ptr<MemoryBuffer>> MemoryBuffer::getSTDIN() { // Read in all of the data from stdin, we cannot mmap stdin. // // FIXME: That isn't necessarily true, we should try to mmap stdin and // fallback if it fails. sys::ChangeStdinToBinary(); return getMemoryBufferForStream(0, "<stdin>"); } MemoryBufferRef MemoryBuffer::getMemBufferRef() const { StringRef Data = getBuffer(); StringRef Identifier = getBufferIdentifier(); return MemoryBufferRef(Data, Identifier); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/GraphWriter.cpp
//===-- GraphWriter.cpp - Implements GraphWriter support routines ---------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements misc. GraphWriter support routines. // //===----------------------------------------------------------------------===// #include "llvm/Support/GraphWriter.h" #include "llvm/Config/config.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Program.h" using namespace llvm; #if 0 // HLSL Change Starts - option pending static cl::opt<bool> ViewBackground("view-background", cl::Hidden, cl::desc("Execute graph viewer in the background. Creates tmp file litter.")); #elif defined (__APPLE__) static const bool ViewBackground = false; #endif // HLSL Change Ends std::string llvm::DOT::EscapeString(const std::string &Label) { std::string Str(Label); for (unsigned i = 0; i != Str.length(); ++i) switch (Str[i]) { case '\n': Str.insert(Str.begin()+i, '\\'); // Escape character... ++i; Str[i] = 'n'; break; case '\t': Str.insert(Str.begin()+i, ' '); // Convert to two spaces ++i; Str[i] = ' '; break; case '\\': if (i+1 != Str.length()) switch (Str[i+1]) { case 'l': continue; // don't disturb \l case '|': case '{': case '}': Str.erase(Str.begin()+i); continue; default: break; } LLVM_FALLTHROUGH; // HLSL Change case '{': case '}': case '<': case '>': case '|': case '"': Str.insert(Str.begin()+i, '\\'); // Escape character... ++i; // don't infinite loop break; } return Str; } /// \brief Get a color string for this node number. Simply round-robin selects /// from a reasonable number of colors. StringRef llvm::DOT::getColorString(unsigned ColorNumber) { static const int NumColors = 20; static const char* Colors[NumColors] = { "aaaaaa", "aa0000", "00aa00", "aa5500", "0055ff", "aa00aa", "00aaaa", "555555", "ff5555", "55ff55", "ffff55", "5555ff", "ff55ff", "55ffff", "ffaaaa", "aaffaa", "ffffaa", "aaaaff", "ffaaff", "aaffff"}; return Colors[ColorNumber % NumColors]; } std::string llvm::createGraphFilename(const Twine &Name, int &FD) { FD = -1; SmallString<128> Filename; std::error_code EC = sys::fs::createTemporaryFile(Name, "dot", FD, Filename); if (EC) { errs() << "Error: " << EC.message() << "\n"; return ""; } errs() << "Writing '" << Filename << "'... "; return Filename.str(); } // Execute the graph viewer. Return true if there were errors. static bool ExecGraphViewer(StringRef ExecPath, std::vector<const char *> &args, StringRef Filename, bool wait, std::string &ErrMsg) { assert(args.back() == nullptr); #if 0 // HLSL Change Starts if (wait) { if (sys::ExecuteAndWait(ExecPath, args.data(), nullptr, nullptr, 0, 0, &ErrMsg)) { errs() << "Error: " << ErrMsg << "\n"; return true; } sys::fs::remove(Filename); errs() << " done. \n"; } else { sys::ExecuteNoWait(ExecPath, args.data(), nullptr, nullptr, 0, &ErrMsg); errs() << "Remember to erase graph file: " << Filename << "\n"; } #else errs() << "Support for graph creation disabled.\n"; #endif // HLSL Change Ends return false; } namespace { struct GraphSession { std::string LogBuffer; bool TryFindProgram(StringRef Names, std::string &ProgramPath) { raw_string_ostream Log(LogBuffer); SmallVector<StringRef, 8> parts; Names.split(parts, "|"); for (auto Name : parts) { if (ErrorOr<std::string> P = sys::findProgramByName(Name)) { ProgramPath = *P; return true; } Log << " Tried '" << Name << "'\n"; } return false; } }; } // namespace static const char *getProgramName(GraphProgram::Name program) { switch (program) { case GraphProgram::DOT: return "dot"; case GraphProgram::FDP: return "fdp"; case GraphProgram::NEATO: return "neato"; case GraphProgram::TWOPI: return "twopi"; case GraphProgram::CIRCO: return "circo"; } llvm_unreachable("bad kind"); } bool llvm::DisplayGraph(StringRef FilenameRef, bool wait, GraphProgram::Name program) { std::string Filename = FilenameRef; std::string ErrMsg; std::string ViewerPath; GraphSession S; #ifdef __APPLE__ wait &= !ViewBackground; if (S.TryFindProgram("open", ViewerPath)) { std::vector<const char *> args; args.push_back(ViewerPath.c_str()); if (wait) args.push_back("-W"); args.push_back(Filename.c_str()); args.push_back(nullptr); errs() << "Trying 'open' program... "; if (!ExecGraphViewer(ViewerPath, args, Filename, wait, ErrMsg)) return false; } #endif if (S.TryFindProgram("xdg-open", ViewerPath)) { std::vector<const char *> args; args.push_back(ViewerPath.c_str()); args.push_back(Filename.c_str()); args.push_back(nullptr); errs() << "Trying 'xdg-open' program... "; if (!ExecGraphViewer(ViewerPath, args, Filename, wait, ErrMsg)) return false; } // Graphviz if (S.TryFindProgram("Graphviz", ViewerPath)) { std::vector<const char *> args; args.push_back(ViewerPath.c_str()); args.push_back(Filename.c_str()); args.push_back(nullptr); errs() << "Running 'Graphviz' program... "; return ExecGraphViewer(ViewerPath, args, Filename, wait, ErrMsg); } // xdot if (S.TryFindProgram("xdot|xdot.py", ViewerPath)) { std::vector<const char *> args; args.push_back(ViewerPath.c_str()); args.push_back(Filename.c_str()); args.push_back("-f"); args.push_back(getProgramName(program)); args.push_back(nullptr); errs() << "Running 'xdot.py' program... "; return ExecGraphViewer(ViewerPath, args, Filename, wait, ErrMsg); } enum PSViewerKind { PSV_None, PSV_OSXOpen, PSV_XDGOpen, PSV_Ghostview }; PSViewerKind PSViewer = PSV_None; #ifdef __APPLE__ if (!PSViewer && S.TryFindProgram("open", ViewerPath)) PSViewer = PSV_OSXOpen; #endif if (!PSViewer && S.TryFindProgram("gv", ViewerPath)) PSViewer = PSV_Ghostview; if (!PSViewer && S.TryFindProgram("xdg-open", ViewerPath)) PSViewer = PSV_XDGOpen; // PostScript graph generator + PostScript viewer std::string GeneratorPath; if (PSViewer && (S.TryFindProgram(getProgramName(program), GeneratorPath) || S.TryFindProgram("dot|fdp|neato|twopi|circo", GeneratorPath))) { std::string PSFilename = Filename + ".ps"; std::vector<const char *> args; args.push_back(GeneratorPath.c_str()); args.push_back("-Tps"); args.push_back("-Nfontname=Courier"); args.push_back("-Gsize=7.5,10"); args.push_back(Filename.c_str()); args.push_back("-o"); args.push_back(PSFilename.c_str()); args.push_back(nullptr); errs() << "Running '" << GeneratorPath << "' program... "; if (ExecGraphViewer(GeneratorPath, args, Filename, wait, ErrMsg)) return true; args.clear(); args.push_back(ViewerPath.c_str()); switch (PSViewer) { case PSV_OSXOpen: args.push_back("-W"); args.push_back(PSFilename.c_str()); break; case PSV_XDGOpen: wait = false; args.push_back(PSFilename.c_str()); break; case PSV_Ghostview: args.push_back("--spartan"); args.push_back(PSFilename.c_str()); break; case PSV_None: llvm_unreachable("Invalid viewer"); } args.push_back(nullptr); ErrMsg.clear(); return ExecGraphViewer(ViewerPath, args, PSFilename, wait, ErrMsg); } // dotty if (S.TryFindProgram("dotty", ViewerPath)) { std::vector<const char *> args; args.push_back(ViewerPath.c_str()); args.push_back(Filename.c_str()); args.push_back(nullptr); // Dotty spawns another app and doesn't wait until it returns #ifdef LLVM_ON_WIN32 wait = false; #endif errs() << "Running 'dotty' program... "; return ExecGraphViewer(ViewerPath, args, Filename, wait, ErrMsg); } errs() << "Error: Couldn't find a usable graph viewer program:\n"; errs() << S.LogBuffer << "\n"; return true; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/Watchdog.cpp
//===---- Watchdog.cpp - Implement Watchdog ---------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the Watchdog class. // //===----------------------------------------------------------------------===// #include "llvm/Support/Watchdog.h" #include "llvm/Config/config.h" // Include the platform-specific parts of this class. #ifdef LLVM_ON_UNIX #include "Unix/Watchdog.inc" #endif #ifdef LLVM_ON_WIN32 #include "Windows/Watchdog.inc" #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/FoldingSet.cpp
//===-- Support/FoldingSet.cpp - Uniquing Hash Set --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a hash set that can be used to remove duplication of // nodes in a graph. // //===----------------------------------------------------------------------===// #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/Hashing.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Host.h" #include "llvm/Support/MathExtras.h" #include <cassert> #include <cstring> using namespace llvm; //===----------------------------------------------------------------------===// // FoldingSetNodeIDRef Implementation /// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef, /// used to lookup the node in the FoldingSetImpl. unsigned FoldingSetNodeIDRef::ComputeHash() const { return static_cast<unsigned>(hash_combine_range(Data, Data+Size)); } bool FoldingSetNodeIDRef::operator==(FoldingSetNodeIDRef RHS) const { if (Size != RHS.Size) return false; return memcmp(Data, RHS.Data, Size*sizeof(*Data)) == 0; } /// Used to compare the "ordering" of two nodes as defined by the /// profiled bits and their ordering defined by memcmp(). bool FoldingSetNodeIDRef::operator<(FoldingSetNodeIDRef RHS) const { if (Size != RHS.Size) return Size < RHS.Size; return memcmp(Data, RHS.Data, Size*sizeof(*Data)) < 0; } //===----------------------------------------------------------------------===// // FoldingSetNodeID Implementation /// Add* - Add various data types to Bit data. /// void FoldingSetNodeID::AddPointer(const void *Ptr) { // Note: this adds pointers to the hash using sizes and endianness that // depend on the host. It doesn't matter, however, because hashing on // pointer values is inherently unstable. Nothing should depend on the // ordering of nodes in the folding set. Bits.append(reinterpret_cast<unsigned *>(&Ptr), reinterpret_cast<unsigned *>(&Ptr+1)); } void FoldingSetNodeID::AddInteger(signed I) { Bits.push_back(I); } void FoldingSetNodeID::AddInteger(unsigned I) { Bits.push_back(I); } void FoldingSetNodeID::AddInteger(long I) { AddInteger((unsigned long)I); } void FoldingSetNodeID::AddInteger(unsigned long I) { if (sizeof(long) == sizeof(int)) AddInteger(unsigned(I)); else if (sizeof(long) == sizeof(long long)) { AddInteger((unsigned long long)I); } else { llvm_unreachable("unexpected sizeof(long)"); } } void FoldingSetNodeID::AddInteger(long long I) { AddInteger((unsigned long long)I); } void FoldingSetNodeID::AddInteger(unsigned long long I) { AddInteger(unsigned(I)); if ((uint64_t)(unsigned)I != I) Bits.push_back(unsigned(I >> 32)); } void FoldingSetNodeID::AddString(StringRef String) { unsigned Size = String.size(); Bits.push_back(Size); if (!Size) return; unsigned Units = Size / 4; unsigned Pos = 0; const unsigned *Base = (const unsigned*) String.data(); // If the string is aligned do a bulk transfer. if (!((intptr_t)Base & 3)) { Bits.append(Base, Base + Units); Pos = (Units + 1) * 4; } else { // Otherwise do it the hard way. // To be compatible with above bulk transfer, we need to take endianness // into account. static_assert(sys::IsBigEndianHost || sys::IsLittleEndianHost, "Unexpected host endianness"); if (sys::IsBigEndianHost) { for (Pos += 4; Pos <= Size; Pos += 4) { unsigned V = ((unsigned char)String[Pos - 4] << 24) | ((unsigned char)String[Pos - 3] << 16) | ((unsigned char)String[Pos - 2] << 8) | (unsigned char)String[Pos - 1]; Bits.push_back(V); } } else { // Little-endian host for (Pos += 4; Pos <= Size; Pos += 4) { unsigned V = ((unsigned char)String[Pos - 1] << 24) | ((unsigned char)String[Pos - 2] << 16) | ((unsigned char)String[Pos - 3] << 8) | (unsigned char)String[Pos - 4]; Bits.push_back(V); } } } // With the leftover bits. unsigned V = 0; // Pos will have overshot size by 4 - #bytes left over. // No need to take endianness into account here - this is always executed. switch (Pos - Size) { case 1: V = (V << 8) | (unsigned char)String[Size - 3]; // Fall thru. LLVM_FALLTHROUGH; // HLSL Change case 2: V = (V << 8) | (unsigned char)String[Size - 2]; // Fall thru. LLVM_FALLTHROUGH; // HLSL Change case 3: V = (V << 8) | (unsigned char)String[Size - 1]; break; default: return; // Nothing left. } Bits.push_back(V); } // AddNodeID - Adds the Bit data of another ID to *this. void FoldingSetNodeID::AddNodeID(const FoldingSetNodeID &ID) { Bits.append(ID.Bits.begin(), ID.Bits.end()); } /// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used to /// lookup the node in the FoldingSetImpl. unsigned FoldingSetNodeID::ComputeHash() const { return FoldingSetNodeIDRef(Bits.data(), Bits.size()).ComputeHash(); } /// operator== - Used to compare two nodes to each other. /// bool FoldingSetNodeID::operator==(const FoldingSetNodeID &RHS) const { return *this == FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size()); } /// operator== - Used to compare two nodes to each other. /// bool FoldingSetNodeID::operator==(FoldingSetNodeIDRef RHS) const { return FoldingSetNodeIDRef(Bits.data(), Bits.size()) == RHS; } /// Used to compare the "ordering" of two nodes as defined by the /// profiled bits and their ordering defined by memcmp(). bool FoldingSetNodeID::operator<(const FoldingSetNodeID &RHS) const { return *this < FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size()); } bool FoldingSetNodeID::operator<(FoldingSetNodeIDRef RHS) const { return FoldingSetNodeIDRef(Bits.data(), Bits.size()) < RHS; } /// Intern - Copy this node's data to a memory region allocated from the /// given allocator and return a FoldingSetNodeIDRef describing the /// interned data. FoldingSetNodeIDRef FoldingSetNodeID::Intern(BumpPtrAllocator &Allocator) const { unsigned *New = Allocator.Allocate<unsigned>(Bits.size()); std::uninitialized_copy(Bits.begin(), Bits.end(), New); return FoldingSetNodeIDRef(New, Bits.size()); } //===----------------------------------------------------------------------===// /// Helper functions for FoldingSetImpl. /// GetNextPtr - In order to save space, each bucket is a /// singly-linked-list. In order to make deletion more efficient, we make /// the list circular, so we can delete a node without computing its hash. /// The problem with this is that the start of the hash buckets are not /// Nodes. If NextInBucketPtr is a bucket pointer, this method returns null: /// use GetBucketPtr when this happens. static FoldingSetImpl::Node *GetNextPtr(void *NextInBucketPtr) { // The low bit is set if this is the pointer back to the bucket. if (reinterpret_cast<intptr_t>(NextInBucketPtr) & 1) return nullptr; return static_cast<FoldingSetImpl::Node*>(NextInBucketPtr); } /// testing. static void **GetBucketPtr(void *NextInBucketPtr) { intptr_t Ptr = reinterpret_cast<intptr_t>(NextInBucketPtr); assert((Ptr & 1) && "Not a bucket pointer"); return reinterpret_cast<void**>(Ptr & ~intptr_t(1)); } /// GetBucketFor - Hash the specified node ID and return the hash bucket for /// the specified ID. static void **GetBucketFor(unsigned Hash, void **Buckets, unsigned NumBuckets) { // NumBuckets is always a power of 2. unsigned BucketNum = Hash & (NumBuckets-1); return Buckets + BucketNum; } /// AllocateBuckets - Allocated initialized bucket memory. static void **AllocateBuckets(unsigned NumBuckets) { void **Buckets = static_cast<void**>(calloc(NumBuckets+1, sizeof(void*))); if (Buckets == nullptr) throw std::bad_alloc(); // HLSL Change // Set the very last bucket to be a non-null "pointer". Buckets[NumBuckets] = reinterpret_cast<void*>(-1); return Buckets; } //===----------------------------------------------------------------------===// // FoldingSetImpl Implementation void FoldingSetImpl::anchor() {} FoldingSetImpl::FoldingSetImpl(unsigned Log2InitSize) { assert(5 < Log2InitSize && Log2InitSize < 32 && "Initial hash table size out of range"); NumBuckets = 1 << Log2InitSize; Buckets = AllocateBuckets(NumBuckets); NumNodes = 0; } FoldingSetImpl::~FoldingSetImpl() { free(Buckets); } void FoldingSetImpl::clear() { // Set all but the last bucket to null pointers. memset(Buckets, 0, NumBuckets*sizeof(void*)); // Set the very last bucket to be a non-null "pointer". Buckets[NumBuckets] = reinterpret_cast<void*>(-1); // Reset the node count to zero. NumNodes = 0; } /// GrowHashTable - Double the size of the hash table and rehash everything. /// void FoldingSetImpl::GrowHashTable() { void **OldBuckets = Buckets; unsigned OldNumBuckets = NumBuckets; NumBuckets <<= 1; // Clear out new buckets. Buckets = AllocateBuckets(NumBuckets); NumNodes = 0; // Walk the old buckets, rehashing nodes into their new place. FoldingSetNodeID TempID; for (unsigned i = 0; i != OldNumBuckets; ++i) { void *Probe = OldBuckets[i]; if (!Probe) continue; while (Node *NodeInBucket = GetNextPtr(Probe)) { // Figure out the next link, remove NodeInBucket from the old link. Probe = NodeInBucket->getNextInBucket(); NodeInBucket->SetNextInBucket(nullptr); // Insert the node into the new bucket, after recomputing the hash. InsertNode(NodeInBucket, GetBucketFor(ComputeNodeHash(NodeInBucket, TempID), Buckets, NumBuckets)); TempID.clear(); } } free(OldBuckets); } /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists, /// return it. If not, return the insertion token that will make insertion /// faster. FoldingSetImpl::Node *FoldingSetImpl::FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) { unsigned IDHash = ID.ComputeHash(); void **Bucket = GetBucketFor(IDHash, Buckets, NumBuckets); void *Probe = *Bucket; InsertPos = nullptr; FoldingSetNodeID TempID; while (Node *NodeInBucket = GetNextPtr(Probe)) { if (NodeEquals(NodeInBucket, ID, IDHash, TempID)) return NodeInBucket; TempID.clear(); Probe = NodeInBucket->getNextInBucket(); } // Didn't find the node, return null with the bucket as the InsertPos. InsertPos = Bucket; return nullptr; } /// InsertNode - Insert the specified node into the folding set, knowing that it /// is not already in the map. InsertPos must be obtained from /// FindNodeOrInsertPos. void FoldingSetImpl::InsertNode(Node *N, void *InsertPos) { assert(!N->getNextInBucket()); // Do we need to grow the hashtable? if (NumNodes+1 > NumBuckets*2) { GrowHashTable(); FoldingSetNodeID TempID; InsertPos = GetBucketFor(ComputeNodeHash(N, TempID), Buckets, NumBuckets); } ++NumNodes; /// The insert position is actually a bucket pointer. void **Bucket = static_cast<void**>(InsertPos); void *Next = *Bucket; // If this is the first insertion into this bucket, its next pointer will be // null. Pretend as if it pointed to itself, setting the low bit to indicate // that it is a pointer to the bucket. if (!Next) Next = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(Bucket)|1); // Set the node's next pointer, and make the bucket point to the node. N->SetNextInBucket(Next); *Bucket = N; } /// RemoveNode - Remove a node from the folding set, returning true if one was /// removed or false if the node was not in the folding set. bool FoldingSetImpl::RemoveNode(Node *N) { // Because each bucket is a circular list, we don't need to compute N's hash // to remove it. void *Ptr = N->getNextInBucket(); if (!Ptr) return false; // Not in folding set. --NumNodes; N->SetNextInBucket(nullptr); // Remember what N originally pointed to, either a bucket or another node. void *NodeNextPtr = Ptr; // Chase around the list until we find the node (or bucket) which points to N. while (true) { if (Node *NodeInBucket = GetNextPtr(Ptr)) { // Advance pointer. Ptr = NodeInBucket->getNextInBucket(); // We found a node that points to N, change it to point to N's next node, // removing N from the list. if (Ptr == N) { NodeInBucket->SetNextInBucket(NodeNextPtr); return true; } } else { void **Bucket = GetBucketPtr(Ptr); Ptr = *Bucket; // If we found that the bucket points to N, update the bucket to point to // whatever is next. if (Ptr == N) { *Bucket = NodeNextPtr; return true; } } } } /// GetOrInsertNode - If there is an existing simple Node exactly /// equal to the specified node, return it. Otherwise, insert 'N' and it /// instead. FoldingSetImpl::Node *FoldingSetImpl::GetOrInsertNode(FoldingSetImpl::Node *N) { FoldingSetNodeID ID; GetNodeProfile(N, ID); void *IP; if (Node *E = FindNodeOrInsertPos(ID, IP)) return E; InsertNode(N, IP); return N; } //===----------------------------------------------------------------------===// // FoldingSetIteratorImpl Implementation FoldingSetIteratorImpl::FoldingSetIteratorImpl(void **Bucket) { // Skip to the first non-null non-self-cycle bucket. while (*Bucket != reinterpret_cast<void*>(-1) && (!*Bucket || !GetNextPtr(*Bucket))) ++Bucket; NodePtr = static_cast<FoldingSetNode*>(*Bucket); } void FoldingSetIteratorImpl::advance() { // If there is another link within this bucket, go to it. void *Probe = NodePtr->getNextInBucket(); if (FoldingSetNode *NextNodeInBucket = GetNextPtr(Probe)) NodePtr = NextNodeInBucket; else { // Otherwise, this is the last link in this bucket. void **Bucket = GetBucketPtr(Probe); // Skip to the next non-null non-self-cycle bucket. do { ++Bucket; } while (*Bucket != reinterpret_cast<void*>(-1) && (!*Bucket || !GetNextPtr(*Bucket))); NodePtr = static_cast<FoldingSetNode*>(*Bucket); } } //===----------------------------------------------------------------------===// // FoldingSetBucketIteratorImpl Implementation FoldingSetBucketIteratorImpl::FoldingSetBucketIteratorImpl(void **Bucket) { Ptr = (!*Bucket || !GetNextPtr(*Bucket)) ? (void*) Bucket : *Bucket; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/TargetRegistry.cpp
//===--- TargetRegistry.cpp - Target registration -------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Support/TargetRegistry.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/raw_ostream.h" #include <cassert> #include <vector> using namespace llvm; // Clients are responsible for avoid race conditions in registration. static Target *FirstTarget = nullptr; iterator_range<TargetRegistry::iterator> TargetRegistry::targets() { return make_range(iterator(FirstTarget), iterator()); } const Target *TargetRegistry::lookupTarget(const std::string &ArchName, Triple &TheTriple, std::string &Error) { // Allocate target machine. First, check whether the user has explicitly // specified an architecture to compile for. If so we have to look it up by // name, because it might be a backend that has no mapping to a target triple. const Target *TheTarget = nullptr; if (!ArchName.empty()) { auto I = std::find_if(targets().begin(), targets().end(), [&](const Target &T) { return ArchName == T.getName(); }); if (I == targets().end()) { Error = "error: invalid target '" + ArchName + "'.\n"; return nullptr; } TheTarget = &*I; // Adjust the triple to match (if known), otherwise stick with the // given triple. Triple::ArchType Type = Triple::getArchTypeForLLVMName(ArchName); if (Type != Triple::UnknownArch) TheTriple.setArch(Type); } else { // Get the target specific parser. std::string TempError; TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), TempError); if (!TheTarget) { Error = ": error: unable to get target for '" + TheTriple.getTriple() + "', see --version and --triple.\n"; return nullptr; } } return TheTarget; } const Target *TargetRegistry::lookupTarget(const std::string &TT, std::string &Error) { // Provide special warning when no targets are initialized. if (targets().begin() == targets().end()) { Error = "Unable to find target for this triple (no targets are registered)"; return nullptr; } Triple::ArchType Arch = Triple(TT).getArch(); auto ArchMatch = [&](const Target &T) { return T.ArchMatchFn(Arch); }; auto I = std::find_if(targets().begin(), targets().end(), ArchMatch); if (I == targets().end()) { Error = "No available targets are compatible with this triple, " "see -version for the available targets."; return nullptr; } auto J = std::find_if(std::next(I), targets().end(), ArchMatch); if (J != targets().end()) { Error = std::string("Cannot choose between targets \"") + I->Name + "\" and \"" + J->Name + "\""; return nullptr; } return &*I; } void TargetRegistry::RegisterTarget(Target &T, const char *Name, const char *ShortDesc, Target::ArchMatchFnTy ArchMatchFn, bool HasJIT) { assert(Name && ShortDesc && ArchMatchFn && "Missing required target information!"); // Check if this target has already been initialized, we allow this as a // convenience to some clients. if (T.Name) return; // Add to the list of targets. T.Next = FirstTarget; FirstTarget = &T; T.Name = Name; T.ShortDesc = ShortDesc; T.ArchMatchFn = ArchMatchFn; T.HasJIT = HasJIT; } // HLSL Change: changed calling convention to __cdecl static int __cdecl TargetArraySortFn(const std::pair<StringRef, const Target *> *LHS, const std::pair<StringRef, const Target *> *RHS) { return LHS->first.compare(RHS->first); } void TargetRegistry::printRegisteredTargetsForVersion() { std::vector<std::pair<StringRef, const Target*> > Targets; size_t Width = 0; for (const auto &T : TargetRegistry::targets()) { Targets.push_back(std::make_pair(T.getName(), &T)); Width = std::max(Width, Targets.back().first.size()); } array_pod_sort(Targets.begin(), Targets.end(), TargetArraySortFn); raw_ostream &OS = outs(); OS << " Registered Targets:\n"; for (unsigned i = 0, e = Targets.size(); i != e; ++i) { OS << " " << Targets[i].first; OS.indent(Width - Targets[i].first.size()) << " - " << Targets[i].second->getShortDescription() << '\n'; } if (Targets.empty()) OS << " (none)\n"; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/LEB128.cpp
//===- LEB128.cpp - LEB128 utility functions implementation -----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements some utility functions for encoding SLEB128 and // ULEB128 values. // //===----------------------------------------------------------------------===// #include "llvm/Support/LEB128.h" namespace llvm { /// Utility function to get the size of the ULEB128-encoded value. unsigned getULEB128Size(uint64_t Value) { unsigned Size = 0; do { Value >>= 7; Size += sizeof(int8_t); } while (Value); return Size; } /// Utility function to get the size of the SLEB128-encoded value. unsigned getSLEB128Size(int64_t Value) { unsigned Size = 0; int Sign = Value >> (8 * sizeof(Value) - 1); bool IsMore; do { unsigned Byte = Value & 0x7f; Value >>= 7; IsMore = Value != Sign || ((Byte ^ Sign) & 0x40) != 0; Size += sizeof(int8_t); } while (IsMore); return Size; } } // namespace llvm
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/CrashRecoveryContext.cpp
//===--- CrashRecoveryContext.cpp - Crash Recovery ------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Support/CrashRecoveryContext.h" #include "llvm/Config/config.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ManagedStatic.h" #include "llvm/Support/Mutex.h" #include "llvm/Support/ThreadLocal.h" #include <setjmp.h> using namespace llvm; namespace { struct CrashRecoveryContextImpl; static ManagedStatic< sys::ThreadLocal<const CrashRecoveryContextImpl> > CurrentContext; struct CrashRecoveryContextImpl { CrashRecoveryContext *CRC; std::string Backtrace; ::jmp_buf JumpBuffer; volatile unsigned Failed : 1; unsigned SwitchedThread : 1; public: CrashRecoveryContextImpl(CrashRecoveryContext *CRC) : CRC(CRC), Failed(false), SwitchedThread(false) { CurrentContext->set(this); } ~CrashRecoveryContextImpl() { if (!SwitchedThread) CurrentContext->erase(); } /// \brief Called when the separate crash-recovery thread was finished, to /// indicate that we don't need to clear the thread-local CurrentContext. void setSwitchedThread() { SwitchedThread = true; } void HandleCrash() { // Eliminate the current context entry, to avoid re-entering in case the // cleanup code crashes. CurrentContext->erase(); assert(!Failed && "Crash recovery context already failed!"); Failed = true; // FIXME: Stash the backtrace. // Jump back to the RunSafely we were called under. longjmp(JumpBuffer, 1); } }; } static ManagedStatic<sys::Mutex> gCrashRecoveryContextMutex; static bool gCrashRecoveryEnabled = false; static ManagedStatic<sys::ThreadLocal<const CrashRecoveryContextCleanup> > tlIsRecoveringFromCrash; CrashRecoveryContextCleanup::~CrashRecoveryContextCleanup() {} CrashRecoveryContext::~CrashRecoveryContext() { // Reclaim registered resources. CrashRecoveryContextCleanup *i = head; tlIsRecoveringFromCrash->set(head); while (i) { CrashRecoveryContextCleanup *tmp = i; i = tmp->next; tmp->cleanupFired = true; tmp->recoverResources(); delete tmp; } tlIsRecoveringFromCrash->erase(); CrashRecoveryContextImpl *CRCI = (CrashRecoveryContextImpl *) Impl; delete CRCI; } bool CrashRecoveryContext::isRecoveringFromCrash() { return tlIsRecoveringFromCrash->get() != nullptr; } CrashRecoveryContext *CrashRecoveryContext::GetCurrent() { if (!gCrashRecoveryEnabled) return nullptr; const CrashRecoveryContextImpl *CRCI = CurrentContext->get(); if (!CRCI) return nullptr; return CRCI->CRC; } void CrashRecoveryContext::registerCleanup(CrashRecoveryContextCleanup *cleanup) { if (!cleanup) return; if (head) head->prev = cleanup; cleanup->next = head; head = cleanup; } void CrashRecoveryContext::unregisterCleanup(CrashRecoveryContextCleanup *cleanup) { if (!cleanup) return; if (cleanup == head) { head = cleanup->next; if (head) head->prev = nullptr; } else { cleanup->prev->next = cleanup->next; if (cleanup->next) cleanup->next->prev = cleanup->prev; } delete cleanup; } #ifdef LLVM_ON_WIN32 #include "Windows/WindowsSupport.h" // On Windows, we can make use of vectored exception handling to // catch most crashing situations. Note that this does mean // we will be alerted of exceptions *before* structured exception // handling has the opportunity to catch it. But that isn't likely // to cause problems because nowhere in the project is SEH being // used. // // Vectored exception handling is built on top of SEH, and so it // works on a per-thread basis. // // The vectored exception handler functionality was added in Windows // XP, so if support for older versions of Windows is required, // it will have to be added. // // If we want to support as far back as Win2k, we could use the // SetUnhandledExceptionFilter API, but there's a risk of that // being entirely overwritten (it's not a chain). static LONG CALLBACK ExceptionHandler(PEXCEPTION_POINTERS ExceptionInfo) { // Lookup the current thread local recovery object. const CrashRecoveryContextImpl *CRCI = CurrentContext->get(); if (!CRCI) { // Something has gone horribly wrong, so let's just tell everyone // to keep searching CrashRecoveryContext::Disable(); return EXCEPTION_CONTINUE_SEARCH; } // TODO: We can capture the stack backtrace here and store it on the // implementation if we so choose. // Handle the crash const_cast<CrashRecoveryContextImpl*>(CRCI)->HandleCrash(); // Note that we don't actually get here because HandleCrash calls // longjmp, which means the HandleCrash function never returns. llvm_unreachable("Handled the crash, should have longjmp'ed out of here"); } // Because the Enable and Disable calls are static, it means that // there may not actually be an Impl available, or even a current // CrashRecoveryContext at all. So we make use of a thread-local // exception table. The handles contained in here will either be // non-NULL, valid VEH handles, or NULL. static sys::ThreadLocal<const void> sCurrentExceptionHandle; void CrashRecoveryContext::Enable() { sys::ScopedLock L(*gCrashRecoveryContextMutex); if (gCrashRecoveryEnabled) return; gCrashRecoveryEnabled = true; // We can set up vectored exception handling now. We will install our // handler as the front of the list, though there's no assurances that // it will remain at the front (another call could install itself before // our handler). This 1) isn't likely, and 2) shouldn't cause problems. PVOID handle = ::AddVectoredExceptionHandler(1, ExceptionHandler); sCurrentExceptionHandle.set(handle); } void CrashRecoveryContext::Disable() { sys::ScopedLock L(*gCrashRecoveryContextMutex); if (!gCrashRecoveryEnabled) return; gCrashRecoveryEnabled = false; PVOID currentHandle = const_cast<PVOID>(sCurrentExceptionHandle.get()); if (currentHandle) { // Now we can remove the vectored exception handler from the chain ::RemoveVectoredExceptionHandler(currentHandle); // Reset the handle in our thread-local set. sCurrentExceptionHandle.set(NULL); } } #else // Generic POSIX implementation. // // This implementation relies on synchronous signals being delivered to the // current thread. We use a thread local object to keep track of the active // crash recovery context, and install signal handlers to invoke HandleCrash on // the active object. // // This implementation does not to attempt to chain signal handlers in any // reliable fashion -- if we get a signal outside of a crash recovery context we // simply disable crash recovery and raise the signal again. #include <signal.h> static const int Signals[] = { SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGSEGV, SIGTRAP }; static const unsigned NumSignals = sizeof(Signals) / sizeof(Signals[0]); static struct sigaction PrevActions[NumSignals]; static void CrashRecoverySignalHandler(int Signal) { // Lookup the current thread local recovery object. const CrashRecoveryContextImpl *CRCI = CurrentContext->get(); if (!CRCI) { // We didn't find a crash recovery context -- this means either we got a // signal on a thread we didn't expect it on, the application got a signal // outside of a crash recovery context, or something else went horribly // wrong. // // Disable crash recovery and raise the signal again. The assumption here is // that the enclosing application will terminate soon, and we won't want to // attempt crash recovery again. // // This call of Disable isn't thread safe, but it doesn't actually matter. CrashRecoveryContext::Disable(); raise(Signal); // The signal will be thrown once the signal mask is restored. return; } // Unblock the signal we received. sigset_t SigMask; sigemptyset(&SigMask); sigaddset(&SigMask, Signal); sigprocmask(SIG_UNBLOCK, &SigMask, nullptr); if (CRCI) const_cast<CrashRecoveryContextImpl*>(CRCI)->HandleCrash(); } void CrashRecoveryContext::Enable() { sys::ScopedLock L(*gCrashRecoveryContextMutex); if (gCrashRecoveryEnabled) return; gCrashRecoveryEnabled = true; // Setup the signal handler. struct sigaction Handler; Handler.sa_handler = CrashRecoverySignalHandler; Handler.sa_flags = 0; sigemptyset(&Handler.sa_mask); for (unsigned i = 0; i != NumSignals; ++i) { sigaction(Signals[i], &Handler, &PrevActions[i]); } } void CrashRecoveryContext::Disable() { sys::ScopedLock L(*gCrashRecoveryContextMutex); if (!gCrashRecoveryEnabled) return; gCrashRecoveryEnabled = false; // Restore the previous signal handlers. for (unsigned i = 0; i != NumSignals; ++i) sigaction(Signals[i], &PrevActions[i], nullptr); } #endif bool CrashRecoveryContext::RunSafely(function_ref<void()> Fn) { // If crash recovery is disabled, do nothing. if (gCrashRecoveryEnabled) { assert(!Impl && "Crash recovery context already initialized!"); CrashRecoveryContextImpl *CRCI = new CrashRecoveryContextImpl(this); Impl = CRCI; if (setjmp(CRCI->JumpBuffer) != 0) { return false; } } Fn(); return true; } void CrashRecoveryContext::HandleCrash() { CrashRecoveryContextImpl *CRCI = (CrashRecoveryContextImpl *) Impl; assert(CRCI && "Crash recovery context never initialized!"); CRCI->HandleCrash(); } const std::string &CrashRecoveryContext::getBacktrace() const { CrashRecoveryContextImpl *CRC = (CrashRecoveryContextImpl *) Impl; assert(CRC && "Crash recovery context never initialized!"); assert(CRC->Failed && "No crash was detected!"); return CRC->Backtrace; } // FIXME: Portability. static void setThreadBackgroundPriority() { #ifdef __APPLE__ setpriority(PRIO_DARWIN_THREAD, 0, PRIO_DARWIN_BG); #endif } static bool hasThreadBackgroundPriority() { #ifdef __APPLE__ return getpriority(PRIO_DARWIN_THREAD, 0) == 1; #else return false; #endif } namespace { struct RunSafelyOnThreadInfo { function_ref<void()> Fn; CrashRecoveryContext *CRC; bool UseBackgroundPriority; bool Result; }; } static void RunSafelyOnThread_Dispatch(void *UserData) { RunSafelyOnThreadInfo *Info = reinterpret_cast<RunSafelyOnThreadInfo*>(UserData); if (Info->UseBackgroundPriority) setThreadBackgroundPriority(); Info->Result = Info->CRC->RunSafely(Info->Fn); } bool CrashRecoveryContext::RunSafelyOnThread(function_ref<void()> Fn, unsigned RequestedStackSize) { bool UseBackgroundPriority = hasThreadBackgroundPriority(); RunSafelyOnThreadInfo Info = { Fn, this, UseBackgroundPriority, false }; llvm_execute_on_thread(RunSafelyOnThread_Dispatch, &Info, RequestedStackSize); if (CrashRecoveryContextImpl *CRC = (CrashRecoveryContextImpl *)Impl) CRC->setSwitchedThread(); return Info.Result; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/ARMBuildAttrs.cpp
//===-- ARMBuildAttrs.cpp - ARM Build Attributes --------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Support/ARMBuildAttributes.h" #include "llvm/ADT/StringRef.h" using namespace llvm; namespace { const struct { ARMBuildAttrs::AttrType Attr; const char *TagName; } ARMAttributeTags[] = { { ARMBuildAttrs::File, "Tag_File" }, { ARMBuildAttrs::Section, "Tag_Section" }, { ARMBuildAttrs::Symbol, "Tag_Symbol" }, { ARMBuildAttrs::CPU_raw_name, "Tag_CPU_raw_name" }, { ARMBuildAttrs::CPU_name, "Tag_CPU_name" }, { ARMBuildAttrs::CPU_arch, "Tag_CPU_arch" }, { ARMBuildAttrs::CPU_arch_profile, "Tag_CPU_arch_profile" }, { ARMBuildAttrs::ARM_ISA_use, "Tag_ARM_ISA_use" }, { ARMBuildAttrs::THUMB_ISA_use, "Tag_THUMB_ISA_use" }, { ARMBuildAttrs::FP_arch, "Tag_FP_arch" }, { ARMBuildAttrs::WMMX_arch, "Tag_WMMX_arch" }, { ARMBuildAttrs::Advanced_SIMD_arch, "Tag_Advanced_SIMD_arch" }, { ARMBuildAttrs::PCS_config, "Tag_PCS_config" }, { ARMBuildAttrs::ABI_PCS_R9_use, "Tag_ABI_PCS_R9_use" }, { ARMBuildAttrs::ABI_PCS_RW_data, "Tag_ABI_PCS_RW_data" }, { ARMBuildAttrs::ABI_PCS_RO_data, "Tag_ABI_PCS_RO_data" }, { ARMBuildAttrs::ABI_PCS_GOT_use, "Tag_ABI_PCS_GOT_use" }, { ARMBuildAttrs::ABI_PCS_wchar_t, "Tag_ABI_PCS_wchar_t" }, { ARMBuildAttrs::ABI_FP_rounding, "Tag_ABI_FP_rounding" }, { ARMBuildAttrs::ABI_FP_denormal, "Tag_ABI_FP_denormal" }, { ARMBuildAttrs::ABI_FP_exceptions, "Tag_ABI_FP_exceptions" }, { ARMBuildAttrs::ABI_FP_user_exceptions, "Tag_ABI_FP_user_exceptions" }, { ARMBuildAttrs::ABI_FP_number_model, "Tag_ABI_FP_number_model" }, { ARMBuildAttrs::ABI_align_needed, "Tag_ABI_align_needed" }, { ARMBuildAttrs::ABI_align_preserved, "Tag_ABI_align_preserved" }, { ARMBuildAttrs::ABI_enum_size, "Tag_ABI_enum_size" }, { ARMBuildAttrs::ABI_HardFP_use, "Tag_ABI_HardFP_use" }, { ARMBuildAttrs::ABI_VFP_args, "Tag_ABI_VFP_args" }, { ARMBuildAttrs::ABI_WMMX_args, "Tag_ABI_WMMX_args" }, { ARMBuildAttrs::ABI_optimization_goals, "Tag_ABI_optimization_goals" }, { ARMBuildAttrs::ABI_FP_optimization_goals, "Tag_ABI_FP_optimization_goals" }, { ARMBuildAttrs::compatibility, "Tag_compatibility" }, { ARMBuildAttrs::CPU_unaligned_access, "Tag_CPU_unaligned_access" }, { ARMBuildAttrs::FP_HP_extension, "Tag_FP_HP_extension" }, { ARMBuildAttrs::ABI_FP_16bit_format, "Tag_ABI_FP_16bit_format" }, { ARMBuildAttrs::MPextension_use, "Tag_MPextension_use" }, { ARMBuildAttrs::DIV_use, "Tag_DIV_use" }, { ARMBuildAttrs::nodefaults, "Tag_nodefaults" }, { ARMBuildAttrs::also_compatible_with, "Tag_also_compatible_with" }, { ARMBuildAttrs::T2EE_use, "Tag_T2EE_use" }, { ARMBuildAttrs::conformance, "Tag_conformance" }, { ARMBuildAttrs::Virtualization_use, "Tag_Virtualization_use" }, // Legacy Names { ARMBuildAttrs::FP_arch, "Tag_VFP_arch" }, { ARMBuildAttrs::FP_HP_extension, "Tag_VFP_HP_extension" }, { ARMBuildAttrs::ABI_align_needed, "Tag_ABI_align8_needed" }, { ARMBuildAttrs::ABI_align_preserved, "Tag_ABI_align8_preserved" }, }; } namespace llvm { namespace ARMBuildAttrs { StringRef AttrTypeAsString(unsigned Attr, bool HasTagPrefix) { return AttrTypeAsString(static_cast<AttrType>(Attr), HasTagPrefix); } StringRef AttrTypeAsString(AttrType Attr, bool HasTagPrefix) { for (unsigned TI = 0, TE = sizeof(ARMAttributeTags) / sizeof(*ARMAttributeTags); TI != TE; ++TI) if (ARMAttributeTags[TI].Attr == Attr) return ARMAttributeTags[TI].TagName + (HasTagPrefix ? 0 : 4); return ""; } int AttrTypeFromString(StringRef Tag) { bool HasTagPrefix = Tag.startswith("Tag_"); for (unsigned TI = 0, TE = sizeof(ARMAttributeTags) / sizeof(*ARMAttributeTags); TI != TE; ++TI) if (StringRef(ARMAttributeTags[TI].TagName + (HasTagPrefix ? 0 : 4)) == Tag) return ARMAttributeTags[TI].Attr; return -1; } } }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/Mutex.cpp
//===- Mutex.cpp - Mutual Exclusion Lock ------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the llvm::sys::Mutex class. // //===----------------------------------------------------------------------===// #include "llvm/Config/config.h" #include "llvm/Support/Mutex.h" //===----------------------------------------------------------------------===// //=== WARNING: Implementation here must contain only TRULY operating system //=== independent code. //===----------------------------------------------------------------------===// #if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0 // Define all methods as no-ops if threading is explicitly disabled namespace llvm { using namespace sys; MutexImpl::MutexImpl( bool recursive) { } MutexImpl::~MutexImpl() { } bool MutexImpl::acquire() { return true; } bool MutexImpl::release() { return true; } bool MutexImpl::tryacquire() { return true; } } #else #if defined(HAVE_PTHREAD_H) && defined(HAVE_PTHREAD_MUTEX_LOCK) #include <cassert> #include <pthread.h> #include <stdlib.h> namespace llvm { using namespace sys; // Construct a Mutex using pthread calls MutexImpl::MutexImpl( bool recursive) : data_(nullptr) { // Declare the pthread_mutex data structures pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(malloc(sizeof(pthread_mutex_t))); pthread_mutexattr_t attr; // Initialize the mutex attributes int errorcode = pthread_mutexattr_init(&attr); assert(errorcode == 0); (void)errorcode; // Initialize the mutex as a recursive mutex, if requested, or normal // otherwise. int kind = ( recursive ? PTHREAD_MUTEX_RECURSIVE : PTHREAD_MUTEX_NORMAL ); errorcode = pthread_mutexattr_settype(&attr, kind); assert(errorcode == 0); // Initialize the mutex errorcode = pthread_mutex_init(mutex, &attr); assert(errorcode == 0); // Destroy the attributes errorcode = pthread_mutexattr_destroy(&attr); assert(errorcode == 0); // Assign the data member data_ = mutex; } // Destruct a Mutex MutexImpl::~MutexImpl() { pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_); assert(mutex != nullptr); pthread_mutex_destroy(mutex); free(mutex); } bool MutexImpl::acquire() { pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_); assert(mutex != nullptr); int errorcode = pthread_mutex_lock(mutex); return errorcode == 0; } bool MutexImpl::release() { pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_); assert(mutex != nullptr); int errorcode = pthread_mutex_unlock(mutex); return errorcode == 0; } bool MutexImpl::tryacquire() { pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_); assert(mutex != nullptr); int errorcode = pthread_mutex_trylock(mutex); return errorcode == 0; } } #elif defined(LLVM_ON_UNIX) #include "Unix/Mutex.inc" #elif defined( LLVM_ON_WIN32) #include "Windows/Mutex.inc" #else #warning Neither LLVM_ON_UNIX nor LLVM_ON_WIN32 was set in Support/Mutex.cpp #endif #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/YAMLParser.cpp
//===--- YAMLParser.cpp - Simple YAML parser ------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a YAML parser. // //===----------------------------------------------------------------------===// #include "llvm/Support/YAMLParser.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Twine.h" #include "llvm/ADT/ilist.h" #include "llvm/ADT/ilist_node.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; using namespace yaml; enum UnicodeEncodingForm { UEF_UTF32_LE, ///< UTF-32 Little Endian UEF_UTF32_BE, ///< UTF-32 Big Endian UEF_UTF16_LE, ///< UTF-16 Little Endian UEF_UTF16_BE, ///< UTF-16 Big Endian UEF_UTF8, ///< UTF-8 or ascii. UEF_Unknown ///< Not a valid Unicode encoding. }; /// EncodingInfo - Holds the encoding type and length of the byte order mark if /// it exists. Length is in {0, 2, 3, 4}. typedef std::pair<UnicodeEncodingForm, unsigned> EncodingInfo; /// getUnicodeEncoding - Reads up to the first 4 bytes to determine the Unicode /// encoding form of \a Input. /// /// @param Input A string of length 0 or more. /// @returns An EncodingInfo indicating the Unicode encoding form of the input /// and how long the byte order mark is if one exists. static EncodingInfo getUnicodeEncoding(StringRef Input) { if (Input.size() == 0) return std::make_pair(UEF_Unknown, 0); switch (uint8_t(Input[0])) { case 0x00: if (Input.size() >= 4) { if ( Input[1] == 0 && uint8_t(Input[2]) == 0xFE && uint8_t(Input[3]) == 0xFF) return std::make_pair(UEF_UTF32_BE, 4); if (Input[1] == 0 && Input[2] == 0 && Input[3] != 0) return std::make_pair(UEF_UTF32_BE, 0); } if (Input.size() >= 2 && Input[1] != 0) return std::make_pair(UEF_UTF16_BE, 0); return std::make_pair(UEF_Unknown, 0); case 0xFF: if ( Input.size() >= 4 && uint8_t(Input[1]) == 0xFE && Input[2] == 0 && Input[3] == 0) return std::make_pair(UEF_UTF32_LE, 4); if (Input.size() >= 2 && uint8_t(Input[1]) == 0xFE) return std::make_pair(UEF_UTF16_LE, 2); return std::make_pair(UEF_Unknown, 0); case 0xFE: if (Input.size() >= 2 && uint8_t(Input[1]) == 0xFF) return std::make_pair(UEF_UTF16_BE, 2); return std::make_pair(UEF_Unknown, 0); case 0xEF: if ( Input.size() >= 3 && uint8_t(Input[1]) == 0xBB && uint8_t(Input[2]) == 0xBF) return std::make_pair(UEF_UTF8, 3); return std::make_pair(UEF_Unknown, 0); } // It could still be utf-32 or utf-16. if (Input.size() >= 4 && Input[1] == 0 && Input[2] == 0 && Input[3] == 0) return std::make_pair(UEF_UTF32_LE, 0); if (Input.size() >= 2 && Input[1] == 0) return std::make_pair(UEF_UTF16_LE, 0); return std::make_pair(UEF_UTF8, 0); } namespace llvm { namespace yaml { /// Pin the vtables to this file. void Node::anchor() {} void NullNode::anchor() {} void ScalarNode::anchor() {} void BlockScalarNode::anchor() {} void KeyValueNode::anchor() {} void MappingNode::anchor() {} void SequenceNode::anchor() {} void AliasNode::anchor() {} /// Token - A single YAML token. struct Token : ilist_node<Token> { enum TokenKind { TK_Error, // Uninitialized token. TK_StreamStart, TK_StreamEnd, TK_VersionDirective, TK_TagDirective, TK_DocumentStart, TK_DocumentEnd, TK_BlockEntry, TK_BlockEnd, TK_BlockSequenceStart, TK_BlockMappingStart, TK_FlowEntry, TK_FlowSequenceStart, TK_FlowSequenceEnd, TK_FlowMappingStart, TK_FlowMappingEnd, TK_Key, TK_Value, TK_Scalar, TK_BlockScalar, TK_Alias, TK_Anchor, TK_Tag } Kind; /// A string of length 0 or more whose begin() points to the logical location /// of the token in the input. StringRef Range; /// The value of a block scalar node. std::string Value; Token() : Kind(TK_Error) {} }; } } namespace llvm { template<> struct ilist_sentinel_traits<Token> { Token *createSentinel() const { return &Sentinel; } static void destroySentinel(Token*) {} Token *provideInitialHead() const { return createSentinel(); } Token *ensureHead(Token*) const { return createSentinel(); } static void noteHead(Token*, Token*) {} private: mutable Token Sentinel; }; template<> struct ilist_node_traits<Token> { Token *createNode(const Token &V) { return new (Alloc.Allocate<Token>()) Token(V); } static void deleteNode(Token *V) { V->~Token(); } void addNodeToList(Token *) {} void removeNodeFromList(Token *) {} void transferNodesFromList(ilist_node_traits & /*SrcTraits*/, ilist_iterator<Token> /*first*/, ilist_iterator<Token> /*last*/) {} BumpPtrAllocator Alloc; }; } typedef ilist<Token> TokenQueueT; namespace { /// @brief This struct is used to track simple keys. /// /// Simple keys are handled by creating an entry in SimpleKeys for each Token /// which could legally be the start of a simple key. When peekNext is called, /// if the Token To be returned is referenced by a SimpleKey, we continue /// tokenizing until that potential simple key has either been found to not be /// a simple key (we moved on to the next line or went further than 1024 chars). /// Or when we run into a Value, and then insert a Key token (and possibly /// others) before the SimpleKey's Tok. struct SimpleKey { TokenQueueT::iterator Tok; unsigned Column; unsigned Line; unsigned FlowLevel; bool IsRequired; bool operator ==(const SimpleKey &Other) { return Tok == Other.Tok; } }; } /// @brief The Unicode scalar value of a UTF-8 minimal well-formed code unit /// subsequence and the subsequence's length in code units (uint8_t). /// A length of 0 represents an error. typedef std::pair<uint32_t, unsigned> UTF8Decoded; static UTF8Decoded decodeUTF8(StringRef Range) { StringRef::iterator Position= Range.begin(); StringRef::iterator End = Range.end(); // 1 byte: [0x00, 0x7f] // Bit pattern: 0xxxxxxx if ((*Position & 0x80) == 0) { return std::make_pair(*Position, 1); } // 2 bytes: [0x80, 0x7ff] // Bit pattern: 110xxxxx 10xxxxxx if (Position + 1 != End && ((*Position & 0xE0) == 0xC0) && ((*(Position + 1) & 0xC0) == 0x80)) { uint32_t codepoint = ((*Position & 0x1F) << 6) | (*(Position + 1) & 0x3F); if (codepoint >= 0x80) return std::make_pair(codepoint, 2); } // 3 bytes: [0x8000, 0xffff] // Bit pattern: 1110xxxx 10xxxxxx 10xxxxxx if (Position + 2 != End && ((*Position & 0xF0) == 0xE0) && ((*(Position + 1) & 0xC0) == 0x80) && ((*(Position + 2) & 0xC0) == 0x80)) { uint32_t codepoint = ((*Position & 0x0F) << 12) | ((*(Position + 1) & 0x3F) << 6) | (*(Position + 2) & 0x3F); // Codepoints between 0xD800 and 0xDFFF are invalid, as // they are high / low surrogate halves used by UTF-16. if (codepoint >= 0x800 && (codepoint < 0xD800 || codepoint > 0xDFFF)) return std::make_pair(codepoint, 3); } // 4 bytes: [0x10000, 0x10FFFF] // Bit pattern: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx if (Position + 3 != End && ((*Position & 0xF8) == 0xF0) && ((*(Position + 1) & 0xC0) == 0x80) && ((*(Position + 2) & 0xC0) == 0x80) && ((*(Position + 3) & 0xC0) == 0x80)) { uint32_t codepoint = ((*Position & 0x07) << 18) | ((*(Position + 1) & 0x3F) << 12) | ((*(Position + 2) & 0x3F) << 6) | (*(Position + 3) & 0x3F); if (codepoint >= 0x10000 && codepoint <= 0x10FFFF) return std::make_pair(codepoint, 4); } return std::make_pair(0, 0); } namespace llvm { namespace yaml { /// @brief Scans YAML tokens from a MemoryBuffer. class Scanner { public: Scanner(StringRef Input, SourceMgr &SM, bool ShowColors = true); Scanner(MemoryBufferRef Buffer, SourceMgr &SM_, bool ShowColors = true); /// @brief Parse the next token and return it without popping it. Token &peekNext(); /// @brief Parse the next token and pop it from the queue. Token getNext(); void printError(SMLoc Loc, SourceMgr::DiagKind Kind, const Twine &Message, ArrayRef<SMRange> Ranges = None) { SM.PrintMessage(Loc, Kind, Message, Ranges, /* FixIts= */ None, ShowColors); } void setError(const Twine &Message, StringRef::iterator Position) { if (Current >= End) Current = End - 1; // Don't print out more errors after the first one we encounter. The rest // are just the result of the first, and have no meaning. if (!Failed) printError(SMLoc::getFromPointer(Current), SourceMgr::DK_Error, Message); Failed = true; } void setError(const Twine &Message) { setError(Message, Current); } /// @brief Returns true if an error occurred while parsing. bool failed() { return Failed; } private: void init(MemoryBufferRef Buffer); StringRef currentInput() { return StringRef(Current, End - Current); } /// @brief Decode a UTF-8 minimal well-formed code unit subsequence starting /// at \a Position. /// /// If the UTF-8 code units starting at Position do not form a well-formed /// code unit subsequence, then the Unicode scalar value is 0, and the length /// is 0. UTF8Decoded decodeUTF8(StringRef::iterator Position) { return ::decodeUTF8(StringRef(Position, End - Position)); } // The following functions are based on the gramar rules in the YAML spec. The // style of the function names it meant to closely match how they are written // in the spec. The number within the [] is the number of the grammar rule in // the spec. // // See 4.2 [Production Naming Conventions] for the meaning of the prefixes. // // c- // A production starting and ending with a special character. // b- // A production matching a single line break. // nb- // A production starting and ending with a non-break character. // s- // A production starting and ending with a white space character. // ns- // A production starting and ending with a non-space character. // l- // A production matching complete line(s). /// @brief Skip a single nb-char[27] starting at Position. /// /// A nb-char is 0x9 | [0x20-0x7E] | 0x85 | [0xA0-0xD7FF] | [0xE000-0xFEFE] /// | [0xFF00-0xFFFD] | [0x10000-0x10FFFF] /// /// @returns The code unit after the nb-char, or Position if it's not an /// nb-char. StringRef::iterator skip_nb_char(StringRef::iterator Position); /// @brief Skip a single b-break[28] starting at Position. /// /// A b-break is 0xD 0xA | 0xD | 0xA /// /// @returns The code unit after the b-break, or Position if it's not a /// b-break. StringRef::iterator skip_b_break(StringRef::iterator Position); /// Skip a single s-space[31] starting at Position. /// /// An s-space is 0x20 /// /// @returns The code unit after the s-space, or Position if it's not a /// s-space. StringRef::iterator skip_s_space(StringRef::iterator Position); /// @brief Skip a single s-white[33] starting at Position. /// /// A s-white is 0x20 | 0x9 /// /// @returns The code unit after the s-white, or Position if it's not a /// s-white. StringRef::iterator skip_s_white(StringRef::iterator Position); /// @brief Skip a single ns-char[34] starting at Position. /// /// A ns-char is nb-char - s-white /// /// @returns The code unit after the ns-char, or Position if it's not a /// ns-char. StringRef::iterator skip_ns_char(StringRef::iterator Position); typedef StringRef::iterator (Scanner::*SkipWhileFunc)(StringRef::iterator); /// @brief Skip minimal well-formed code unit subsequences until Func /// returns its input. /// /// @returns The code unit after the last minimal well-formed code unit /// subsequence that Func accepted. StringRef::iterator skip_while( SkipWhileFunc Func , StringRef::iterator Position); /// Skip minimal well-formed code unit subsequences until Func returns its /// input. void advanceWhile(SkipWhileFunc Func); /// @brief Scan ns-uri-char[39]s starting at Cur. /// /// This updates Cur and Column while scanning. /// /// @returns A StringRef starting at Cur which covers the longest contiguous /// sequence of ns-uri-char. StringRef scan_ns_uri_char(); /// @brief Consume a minimal well-formed code unit subsequence starting at /// \a Cur. Return false if it is not the same Unicode scalar value as /// \a Expected. This updates \a Column. bool consume(uint32_t Expected); /// @brief Skip \a Distance UTF-8 code units. Updates \a Cur and \a Column. void skip(uint32_t Distance); /// @brief Return true if the minimal well-formed code unit subsequence at /// Pos is whitespace or a new line bool isBlankOrBreak(StringRef::iterator Position); /// Consume a single b-break[28] if it's present at the current position. /// /// Return false if the code unit at the current position isn't a line break. bool consumeLineBreakIfPresent(); /// @brief If IsSimpleKeyAllowed, create and push_back a new SimpleKey. void saveSimpleKeyCandidate( TokenQueueT::iterator Tok , unsigned AtColumn , bool IsRequired); /// @brief Remove simple keys that can no longer be valid simple keys. /// /// Invalid simple keys are not on the current line or are further than 1024 /// columns back. void removeStaleSimpleKeyCandidates(); /// @brief Remove all simple keys on FlowLevel \a Level. void removeSimpleKeyCandidatesOnFlowLevel(unsigned Level); /// @brief Unroll indentation in \a Indents back to \a Col. Creates BlockEnd /// tokens if needed. bool unrollIndent(int ToColumn); /// @brief Increase indent to \a Col. Creates \a Kind token at \a InsertPoint /// if needed. bool rollIndent( int ToColumn , Token::TokenKind Kind , TokenQueueT::iterator InsertPoint); /// @brief Skip a single-line comment when the comment starts at the current /// position of the scanner. void skipComment(); /// @brief Skip whitespace and comments until the start of the next token. void scanToNextToken(); /// @brief Must be the first token generated. bool scanStreamStart(); /// @brief Generate tokens needed to close out the stream. bool scanStreamEnd(); /// @brief Scan a %BLAH directive. bool scanDirective(); /// @brief Scan a ... or ---. bool scanDocumentIndicator(bool IsStart); /// @brief Scan a [ or { and generate the proper flow collection start token. bool scanFlowCollectionStart(bool IsSequence); /// @brief Scan a ] or } and generate the proper flow collection end token. bool scanFlowCollectionEnd(bool IsSequence); /// @brief Scan the , that separates entries in a flow collection. bool scanFlowEntry(); /// @brief Scan the - that starts block sequence entries. bool scanBlockEntry(); /// @brief Scan an explicit ? indicating a key. bool scanKey(); /// @brief Scan an explicit : indicating a value. bool scanValue(); /// @brief Scan a quoted scalar. bool scanFlowScalar(bool IsDoubleQuoted); /// @brief Scan an unquoted scalar. bool scanPlainScalar(); /// @brief Scan an Alias or Anchor starting with * or &. bool scanAliasOrAnchor(bool IsAlias); /// @brief Scan a block scalar starting with | or >. bool scanBlockScalar(bool IsLiteral); /// Scan a chomping indicator in a block scalar header. char scanBlockChompingIndicator(); /// Scan an indentation indicator in a block scalar header. unsigned scanBlockIndentationIndicator(); /// Scan a block scalar header. /// /// Return false if an error occurred. bool scanBlockScalarHeader(char &ChompingIndicator, unsigned &IndentIndicator, bool &IsDone); /// Look for the indentation level of a block scalar. /// /// Return false if an error occurred. bool findBlockScalarIndent(unsigned &BlockIndent, unsigned BlockExitIndent, unsigned &LineBreaks, bool &IsDone); /// Scan the indentation of a text line in a block scalar. /// /// Return false if an error occurred. bool scanBlockScalarIndent(unsigned BlockIndent, unsigned BlockExitIndent, bool &IsDone); /// @brief Scan a tag of the form !stuff. bool scanTag(); /// @brief Dispatch to the next scanning function based on \a *Cur. bool fetchMoreTokens(); /// @brief The SourceMgr used for diagnostics and buffer management. SourceMgr &SM; /// @brief The original input. MemoryBufferRef InputBuffer; /// @brief The current position of the scanner. StringRef::iterator Current; /// @brief The end of the input (one past the last character). StringRef::iterator End; /// @brief Current YAML indentation level in spaces. int Indent; /// @brief Current column number in Unicode code points. unsigned Column; /// @brief Current line number. unsigned Line; /// @brief How deep we are in flow style containers. 0 Means at block level. unsigned FlowLevel; /// @brief Are we at the start of the stream? bool IsStartOfStream; /// @brief Can the next token be the start of a simple key? bool IsSimpleKeyAllowed; /// @brief True if an error has occurred. bool Failed; /// @brief Should colors be used when printing out the diagnostic messages? bool ShowColors; /// @brief Queue of tokens. This is required to queue up tokens while looking /// for the end of a simple key. And for cases where a single character /// can produce multiple tokens (e.g. BlockEnd). TokenQueueT TokenQueue; /// @brief Indentation levels. SmallVector<int, 4> Indents; /// @brief Potential simple keys. SmallVector<SimpleKey, 4> SimpleKeys; }; } // end namespace yaml } // end namespace llvm /// encodeUTF8 - Encode \a UnicodeScalarValue in UTF-8 and append it to result. static void encodeUTF8( uint32_t UnicodeScalarValue , SmallVectorImpl<char> &Result) { if (UnicodeScalarValue <= 0x7F) { Result.push_back(UnicodeScalarValue & 0x7F); } else if (UnicodeScalarValue <= 0x7FF) { uint8_t FirstByte = 0xC0 | ((UnicodeScalarValue & 0x7C0) >> 6); uint8_t SecondByte = 0x80 | (UnicodeScalarValue & 0x3F); Result.push_back(FirstByte); Result.push_back(SecondByte); } else if (UnicodeScalarValue <= 0xFFFF) { uint8_t FirstByte = 0xE0 | ((UnicodeScalarValue & 0xF000) >> 12); uint8_t SecondByte = 0x80 | ((UnicodeScalarValue & 0xFC0) >> 6); uint8_t ThirdByte = 0x80 | (UnicodeScalarValue & 0x3F); Result.push_back(FirstByte); Result.push_back(SecondByte); Result.push_back(ThirdByte); } else if (UnicodeScalarValue <= 0x10FFFF) { uint8_t FirstByte = 0xF0 | ((UnicodeScalarValue & 0x1F0000) >> 18); uint8_t SecondByte = 0x80 | ((UnicodeScalarValue & 0x3F000) >> 12); uint8_t ThirdByte = 0x80 | ((UnicodeScalarValue & 0xFC0) >> 6); uint8_t FourthByte = 0x80 | (UnicodeScalarValue & 0x3F); Result.push_back(FirstByte); Result.push_back(SecondByte); Result.push_back(ThirdByte); Result.push_back(FourthByte); } } bool yaml::dumpTokens(StringRef Input, raw_ostream &OS) { SourceMgr SM; Scanner scanner(Input, SM); while (true) { Token T = scanner.getNext(); switch (T.Kind) { case Token::TK_StreamStart: OS << "Stream-Start: "; break; case Token::TK_StreamEnd: OS << "Stream-End: "; break; case Token::TK_VersionDirective: OS << "Version-Directive: "; break; case Token::TK_TagDirective: OS << "Tag-Directive: "; break; case Token::TK_DocumentStart: OS << "Document-Start: "; break; case Token::TK_DocumentEnd: OS << "Document-End: "; break; case Token::TK_BlockEntry: OS << "Block-Entry: "; break; case Token::TK_BlockEnd: OS << "Block-End: "; break; case Token::TK_BlockSequenceStart: OS << "Block-Sequence-Start: "; break; case Token::TK_BlockMappingStart: OS << "Block-Mapping-Start: "; break; case Token::TK_FlowEntry: OS << "Flow-Entry: "; break; case Token::TK_FlowSequenceStart: OS << "Flow-Sequence-Start: "; break; case Token::TK_FlowSequenceEnd: OS << "Flow-Sequence-End: "; break; case Token::TK_FlowMappingStart: OS << "Flow-Mapping-Start: "; break; case Token::TK_FlowMappingEnd: OS << "Flow-Mapping-End: "; break; case Token::TK_Key: OS << "Key: "; break; case Token::TK_Value: OS << "Value: "; break; case Token::TK_Scalar: OS << "Scalar: "; break; case Token::TK_BlockScalar: OS << "Block Scalar: "; break; case Token::TK_Alias: OS << "Alias: "; break; case Token::TK_Anchor: OS << "Anchor: "; break; case Token::TK_Tag: OS << "Tag: "; break; case Token::TK_Error: break; } OS << T.Range << "\n"; if (T.Kind == Token::TK_StreamEnd) break; else if (T.Kind == Token::TK_Error) return false; } return true; } bool yaml::scanTokens(StringRef Input) { llvm::SourceMgr SM; llvm::yaml::Scanner scanner(Input, SM); for (;;) { llvm::yaml::Token T = scanner.getNext(); if (T.Kind == Token::TK_StreamEnd) break; else if (T.Kind == Token::TK_Error) return false; } return true; } std::string yaml::escape(StringRef Input) { std::string EscapedInput; for (StringRef::iterator i = Input.begin(), e = Input.end(); i != e; ++i) { if (*i == '\\') EscapedInput += "\\\\"; else if (*i == '"') EscapedInput += "\\\""; else if (*i == 0) EscapedInput += "\\0"; else if (*i == 0x07) EscapedInput += "\\a"; else if (*i == 0x08) EscapedInput += "\\b"; else if (*i == 0x09) EscapedInput += "\\t"; else if (*i == 0x0A) EscapedInput += "\\n"; else if (*i == 0x0B) EscapedInput += "\\v"; else if (*i == 0x0C) EscapedInput += "\\f"; else if (*i == 0x0D) EscapedInput += "\\r"; else if (*i == 0x1B) EscapedInput += "\\e"; else if ((unsigned char)*i < 0x20) { // Control characters not handled above. std::string HexStr = utohexstr(*i); EscapedInput += "\\x" + std::string(2 - HexStr.size(), '0') + HexStr; } else if (*i & 0x80) { // UTF-8 multiple code unit subsequence. UTF8Decoded UnicodeScalarValue = decodeUTF8(StringRef(i, Input.end() - i)); if (UnicodeScalarValue.second == 0) { // Found invalid char. SmallString<4> Val; encodeUTF8(0xFFFD, Val); EscapedInput.insert(EscapedInput.end(), Val.begin(), Val.end()); // FIXME: Error reporting. return EscapedInput; } if (UnicodeScalarValue.first == 0x85) EscapedInput += "\\N"; else if (UnicodeScalarValue.first == 0xA0) EscapedInput += "\\_"; else if (UnicodeScalarValue.first == 0x2028) EscapedInput += "\\L"; else if (UnicodeScalarValue.first == 0x2029) EscapedInput += "\\P"; else { std::string HexStr = utohexstr(UnicodeScalarValue.first); if (HexStr.size() <= 2) EscapedInput += "\\x" + std::string(2 - HexStr.size(), '0') + HexStr; else if (HexStr.size() <= 4) EscapedInput += "\\u" + std::string(4 - HexStr.size(), '0') + HexStr; else if (HexStr.size() <= 8) EscapedInput += "\\U" + std::string(8 - HexStr.size(), '0') + HexStr; } i += UnicodeScalarValue.second - 1; } else EscapedInput.push_back(*i); } return EscapedInput; } Scanner::Scanner(StringRef Input, SourceMgr &sm, bool ShowColors) : SM(sm), ShowColors(ShowColors) { init(MemoryBufferRef(Input, "YAML")); } Scanner::Scanner(MemoryBufferRef Buffer, SourceMgr &SM_, bool ShowColors) : SM(SM_), ShowColors(ShowColors) { init(Buffer); } void Scanner::init(MemoryBufferRef Buffer) { InputBuffer = Buffer; Current = InputBuffer.getBufferStart(); End = InputBuffer.getBufferEnd(); Indent = -1; Column = 0; Line = 0; FlowLevel = 0; IsStartOfStream = true; IsSimpleKeyAllowed = true; Failed = false; std::unique_ptr<MemoryBuffer> InputBufferOwner = MemoryBuffer::getMemBuffer(Buffer); SM.AddNewSourceBuffer(std::move(InputBufferOwner), SMLoc()); } Token &Scanner::peekNext() { // If the current token is a possible simple key, keep parsing until we // can confirm. bool NeedMore = false; while (true) { if (TokenQueue.empty() || NeedMore) { if (!fetchMoreTokens()) { TokenQueue.clear(); TokenQueue.push_back(Token()); return TokenQueue.front(); } } assert(!TokenQueue.empty() && "fetchMoreTokens lied about getting tokens!"); removeStaleSimpleKeyCandidates(); SimpleKey SK; SK.Tok = TokenQueue.front(); if (std::find(SimpleKeys.begin(), SimpleKeys.end(), SK) == SimpleKeys.end()) break; else NeedMore = true; } return TokenQueue.front(); } Token Scanner::getNext() { Token Ret = peekNext(); // TokenQueue can be empty if there was an error getting the next token. if (!TokenQueue.empty()) TokenQueue.pop_front(); // There cannot be any referenced Token's if the TokenQueue is empty. So do a // quick deallocation of them all. if (TokenQueue.empty()) { TokenQueue.Alloc.Reset(); } return Ret; } StringRef::iterator Scanner::skip_nb_char(StringRef::iterator Position) { if (Position == End) return Position; // Check 7 bit c-printable - b-char. if ( *Position == 0x09 || (*Position >= 0x20 && *Position <= 0x7E)) return Position + 1; // Check for valid UTF-8. if (uint8_t(*Position) & 0x80) { UTF8Decoded u8d = decodeUTF8(Position); if ( u8d.second != 0 && u8d.first != 0xFEFF && ( u8d.first == 0x85 || ( u8d.first >= 0xA0 && u8d.first <= 0xD7FF) || ( u8d.first >= 0xE000 && u8d.first <= 0xFFFD) || ( u8d.first >= 0x10000 && u8d.first <= 0x10FFFF))) return Position + u8d.second; } return Position; } StringRef::iterator Scanner::skip_b_break(StringRef::iterator Position) { if (Position == End) return Position; if (*Position == 0x0D) { if (Position + 1 != End && *(Position + 1) == 0x0A) return Position + 2; return Position + 1; } if (*Position == 0x0A) return Position + 1; return Position; } StringRef::iterator Scanner::skip_s_space(StringRef::iterator Position) { if (Position == End) return Position; if (*Position == ' ') return Position + 1; return Position; } StringRef::iterator Scanner::skip_s_white(StringRef::iterator Position) { if (Position == End) return Position; if (*Position == ' ' || *Position == '\t') return Position + 1; return Position; } StringRef::iterator Scanner::skip_ns_char(StringRef::iterator Position) { if (Position == End) return Position; if (*Position == ' ' || *Position == '\t') return Position; return skip_nb_char(Position); } StringRef::iterator Scanner::skip_while( SkipWhileFunc Func , StringRef::iterator Position) { while (true) { StringRef::iterator i = (this->*Func)(Position); if (i == Position) break; Position = i; } return Position; } void Scanner::advanceWhile(SkipWhileFunc Func) { auto Final = skip_while(Func, Current); Column += Final - Current; Current = Final; } static bool is_ns_hex_digit(const char C) { return (C >= '0' && C <= '9') || (C >= 'a' && C <= 'z') || (C >= 'A' && C <= 'Z'); } static bool is_ns_word_char(const char C) { return C == '-' || (C >= 'a' && C <= 'z') || (C >= 'A' && C <= 'Z'); } StringRef Scanner::scan_ns_uri_char() { StringRef::iterator Start = Current; while (true) { if (Current == End) break; if (( *Current == '%' && Current + 2 < End && is_ns_hex_digit(*(Current + 1)) && is_ns_hex_digit(*(Current + 2))) || is_ns_word_char(*Current) || StringRef(Current, 1).find_first_of("#;/?:@&=+$,_.!~*'()[]") != StringRef::npos) { ++Current; ++Column; } else break; } return StringRef(Start, Current - Start); } bool Scanner::consume(uint32_t Expected) { if (Expected >= 0x80) report_fatal_error("Not dealing with this yet"); if (Current == End) return false; if (uint8_t(*Current) >= 0x80) report_fatal_error("Not dealing with this yet"); if (uint8_t(*Current) == Expected) { ++Current; ++Column; return true; } return false; } void Scanner::skip(uint32_t Distance) { Current += Distance; Column += Distance; assert(Current <= End && "Skipped past the end"); } bool Scanner::isBlankOrBreak(StringRef::iterator Position) { if (Position == End) return false; if ( *Position == ' ' || *Position == '\t' || *Position == '\r' || *Position == '\n') return true; return false; } bool Scanner::consumeLineBreakIfPresent() { auto Next = skip_b_break(Current); if (Next == Current) return false; Column = 0; ++Line; Current = Next; return true; } void Scanner::saveSimpleKeyCandidate( TokenQueueT::iterator Tok , unsigned AtColumn , bool IsRequired) { if (IsSimpleKeyAllowed) { SimpleKey SK; SK.Tok = Tok; SK.Line = Line; SK.Column = AtColumn; SK.IsRequired = IsRequired; SK.FlowLevel = FlowLevel; SimpleKeys.push_back(SK); } } void Scanner::removeStaleSimpleKeyCandidates() { for (SmallVectorImpl<SimpleKey>::iterator i = SimpleKeys.begin(); i != SimpleKeys.end();) { if (i->Line != Line || i->Column + 1024 < Column) { if (i->IsRequired) setError( "Could not find expected : for simple key" , i->Tok->Range.begin()); i = SimpleKeys.erase(i); } else ++i; } } void Scanner::removeSimpleKeyCandidatesOnFlowLevel(unsigned Level) { if (!SimpleKeys.empty() && (SimpleKeys.end() - 1)->FlowLevel == Level) SimpleKeys.pop_back(); } bool Scanner::unrollIndent(int ToColumn) { Token T; // Indentation is ignored in flow. if (FlowLevel != 0) return true; while (Indent > ToColumn) { T.Kind = Token::TK_BlockEnd; T.Range = StringRef(Current, 1); TokenQueue.push_back(T); Indent = Indents.pop_back_val(); } return true; } bool Scanner::rollIndent( int ToColumn , Token::TokenKind Kind , TokenQueueT::iterator InsertPoint) { if (FlowLevel) return true; if (Indent < ToColumn) { Indents.push_back(Indent); Indent = ToColumn; Token T; T.Kind = Kind; T.Range = StringRef(Current, 0); TokenQueue.insert(InsertPoint, T); } return true; } void Scanner::skipComment() { if (*Current != '#') return; while (true) { // This may skip more than one byte, thus Column is only incremented // for code points. StringRef::iterator I = skip_nb_char(Current); if (I == Current) break; Current = I; ++Column; } } void Scanner::scanToNextToken() { while (true) { while (*Current == ' ' || *Current == '\t') { skip(1); } skipComment(); // Skip EOL. StringRef::iterator i = skip_b_break(Current); if (i == Current) break; Current = i; ++Line; Column = 0; // New lines may start a simple key. if (!FlowLevel) IsSimpleKeyAllowed = true; } } bool Scanner::scanStreamStart() { IsStartOfStream = false; EncodingInfo EI = getUnicodeEncoding(currentInput()); Token T; T.Kind = Token::TK_StreamStart; T.Range = StringRef(Current, EI.second); TokenQueue.push_back(T); Current += EI.second; return true; } bool Scanner::scanStreamEnd() { // Force an ending new line if one isn't present. if (Column != 0) { Column = 0; ++Line; } unrollIndent(-1); SimpleKeys.clear(); IsSimpleKeyAllowed = false; Token T; T.Kind = Token::TK_StreamEnd; T.Range = StringRef(Current, 0); TokenQueue.push_back(T); return true; } bool Scanner::scanDirective() { // Reset the indentation level. unrollIndent(-1); SimpleKeys.clear(); IsSimpleKeyAllowed = false; StringRef::iterator Start = Current; consume('%'); StringRef::iterator NameStart = Current; Current = skip_while(&Scanner::skip_ns_char, Current); StringRef Name(NameStart, Current - NameStart); Current = skip_while(&Scanner::skip_s_white, Current); Token T; if (Name == "YAML") { Current = skip_while(&Scanner::skip_ns_char, Current); T.Kind = Token::TK_VersionDirective; T.Range = StringRef(Start, Current - Start); TokenQueue.push_back(T); return true; } else if(Name == "TAG") { Current = skip_while(&Scanner::skip_ns_char, Current); Current = skip_while(&Scanner::skip_s_white, Current); Current = skip_while(&Scanner::skip_ns_char, Current); T.Kind = Token::TK_TagDirective; T.Range = StringRef(Start, Current - Start); TokenQueue.push_back(T); return true; } return false; } bool Scanner::scanDocumentIndicator(bool IsStart) { unrollIndent(-1); SimpleKeys.clear(); IsSimpleKeyAllowed = false; Token T; T.Kind = IsStart ? Token::TK_DocumentStart : Token::TK_DocumentEnd; T.Range = StringRef(Current, 3); skip(3); TokenQueue.push_back(T); return true; } bool Scanner::scanFlowCollectionStart(bool IsSequence) { Token T; T.Kind = IsSequence ? Token::TK_FlowSequenceStart : Token::TK_FlowMappingStart; T.Range = StringRef(Current, 1); skip(1); TokenQueue.push_back(T); // [ and { may begin a simple key. saveSimpleKeyCandidate(TokenQueue.back(), Column - 1, false); // And may also be followed by a simple key. IsSimpleKeyAllowed = true; ++FlowLevel; return true; } bool Scanner::scanFlowCollectionEnd(bool IsSequence) { removeSimpleKeyCandidatesOnFlowLevel(FlowLevel); IsSimpleKeyAllowed = false; Token T; T.Kind = IsSequence ? Token::TK_FlowSequenceEnd : Token::TK_FlowMappingEnd; T.Range = StringRef(Current, 1); skip(1); TokenQueue.push_back(T); if (FlowLevel) --FlowLevel; return true; } bool Scanner::scanFlowEntry() { removeSimpleKeyCandidatesOnFlowLevel(FlowLevel); IsSimpleKeyAllowed = true; Token T; T.Kind = Token::TK_FlowEntry; T.Range = StringRef(Current, 1); skip(1); TokenQueue.push_back(T); return true; } bool Scanner::scanBlockEntry() { rollIndent(Column, Token::TK_BlockSequenceStart, TokenQueue.end()); removeSimpleKeyCandidatesOnFlowLevel(FlowLevel); IsSimpleKeyAllowed = true; Token T; T.Kind = Token::TK_BlockEntry; T.Range = StringRef(Current, 1); skip(1); TokenQueue.push_back(T); return true; } bool Scanner::scanKey() { if (!FlowLevel) rollIndent(Column, Token::TK_BlockMappingStart, TokenQueue.end()); removeSimpleKeyCandidatesOnFlowLevel(FlowLevel); IsSimpleKeyAllowed = !FlowLevel; Token T; T.Kind = Token::TK_Key; T.Range = StringRef(Current, 1); skip(1); TokenQueue.push_back(T); return true; } bool Scanner::scanValue() { // If the previous token could have been a simple key, insert the key token // into the token queue. if (!SimpleKeys.empty()) { SimpleKey SK = SimpleKeys.pop_back_val(); Token T; T.Kind = Token::TK_Key; T.Range = SK.Tok->Range; TokenQueueT::iterator i, e; for (i = TokenQueue.begin(), e = TokenQueue.end(); i != e; ++i) { if (i == SK.Tok) break; } assert(i != e && "SimpleKey not in token queue!"); i = TokenQueue.insert(i, T); // We may also need to add a Block-Mapping-Start token. rollIndent(SK.Column, Token::TK_BlockMappingStart, i); IsSimpleKeyAllowed = false; } else { if (!FlowLevel) rollIndent(Column, Token::TK_BlockMappingStart, TokenQueue.end()); IsSimpleKeyAllowed = !FlowLevel; } Token T; T.Kind = Token::TK_Value; T.Range = StringRef(Current, 1); skip(1); TokenQueue.push_back(T); return true; } // Forbidding inlining improves performance by roughly 20%. // FIXME: Remove once llvm optimizes this to the faster version without hints. LLVM_ATTRIBUTE_NOINLINE static bool wasEscaped(StringRef::iterator First, StringRef::iterator Position); // Returns whether a character at 'Position' was escaped with a leading '\'. // 'First' specifies the position of the first character in the string. static bool wasEscaped(StringRef::iterator First, StringRef::iterator Position) { assert(Position - 1 >= First); StringRef::iterator I = Position - 1; // We calculate the number of consecutive '\'s before the current position // by iterating backwards through our string. while (I >= First && *I == '\\') --I; // (Position - 1 - I) now contains the number of '\'s before the current // position. If it is odd, the character at 'Position' was escaped. return (Position - 1 - I) % 2 == 1; } bool Scanner::scanFlowScalar(bool IsDoubleQuoted) { StringRef::iterator Start = Current; unsigned ColStart = Column; if (IsDoubleQuoted) { do { ++Current; while (Current != End && *Current != '"') ++Current; // Repeat until the previous character was not a '\' or was an escaped // backslash. } while ( Current != End && *(Current - 1) == '\\' && wasEscaped(Start + 1, Current)); } else { skip(1); while (true) { // Skip a ' followed by another '. if (Current + 1 < End && *Current == '\'' && *(Current + 1) == '\'') { skip(2); continue; } else if (*Current == '\'') break; StringRef::iterator i = skip_nb_char(Current); if (i == Current) { i = skip_b_break(Current); if (i == Current) break; Current = i; Column = 0; ++Line; } else { if (i == End) break; Current = i; ++Column; } } } if (Current == End) { setError("Expected quote at end of scalar", Current); return false; } skip(1); // Skip ending quote. Token T; T.Kind = Token::TK_Scalar; T.Range = StringRef(Start, Current - Start); TokenQueue.push_back(T); saveSimpleKeyCandidate(TokenQueue.back(), ColStart, false); IsSimpleKeyAllowed = false; return true; } bool Scanner::scanPlainScalar() { StringRef::iterator Start = Current; unsigned ColStart = Column; unsigned LeadingBlanks = 0; assert(Indent >= -1 && "Indent must be >= -1 !"); unsigned indent = static_cast<unsigned>(Indent + 1); while (true) { if (*Current == '#') break; while (!isBlankOrBreak(Current)) { if ( FlowLevel && *Current == ':' && !(isBlankOrBreak(Current + 1) || *(Current + 1) == ',')) { setError("Found unexpected ':' while scanning a plain scalar", Current); return false; } // Check for the end of the plain scalar. if ( (*Current == ':' && isBlankOrBreak(Current + 1)) || ( FlowLevel && (StringRef(Current, 1).find_first_of(",:?[]{}") != StringRef::npos))) break; StringRef::iterator i = skip_nb_char(Current); if (i == Current) break; Current = i; ++Column; } // Are we at the end? if (!isBlankOrBreak(Current)) break; // Eat blanks. StringRef::iterator Tmp = Current; while (isBlankOrBreak(Tmp)) { StringRef::iterator i = skip_s_white(Tmp); if (i != Tmp) { if (LeadingBlanks && (Column < indent) && *Tmp == '\t') { setError("Found invalid tab character in indentation", Tmp); return false; } Tmp = i; ++Column; } else { i = skip_b_break(Tmp); if (!LeadingBlanks) LeadingBlanks = 1; Tmp = i; Column = 0; ++Line; } } if (!FlowLevel && Column < indent) break; Current = Tmp; } if (Start == Current) { setError("Got empty plain scalar", Start); return false; } Token T; T.Kind = Token::TK_Scalar; T.Range = StringRef(Start, Current - Start); TokenQueue.push_back(T); // Plain scalars can be simple keys. saveSimpleKeyCandidate(TokenQueue.back(), ColStart, false); IsSimpleKeyAllowed = false; return true; } bool Scanner::scanAliasOrAnchor(bool IsAlias) { StringRef::iterator Start = Current; unsigned ColStart = Column; skip(1); while(true) { if ( *Current == '[' || *Current == ']' || *Current == '{' || *Current == '}' || *Current == ',' || *Current == ':') break; StringRef::iterator i = skip_ns_char(Current); if (i == Current) break; Current = i; ++Column; } if (Start == Current) { setError("Got empty alias or anchor", Start); return false; } Token T; T.Kind = IsAlias ? Token::TK_Alias : Token::TK_Anchor; T.Range = StringRef(Start, Current - Start); TokenQueue.push_back(T); // Alias and anchors can be simple keys. saveSimpleKeyCandidate(TokenQueue.back(), ColStart, false); IsSimpleKeyAllowed = false; return true; } char Scanner::scanBlockChompingIndicator() { char Indicator = ' '; if (Current != End && (*Current == '+' || *Current == '-')) { Indicator = *Current; skip(1); } return Indicator; } /// Get the number of line breaks after chomping. /// /// Return the number of trailing line breaks to emit, depending on /// \p ChompingIndicator. static unsigned getChompedLineBreaks(char ChompingIndicator, unsigned LineBreaks, StringRef Str) { if (ChompingIndicator == '-') // Strip all line breaks. return 0; if (ChompingIndicator == '+') // Keep all line breaks. return LineBreaks; // Clip trailing lines. return Str.empty() ? 0 : 1; } unsigned Scanner::scanBlockIndentationIndicator() { unsigned Indent = 0; if (Current != End && (*Current >= '1' && *Current <= '9')) { Indent = unsigned(*Current - '0'); skip(1); } return Indent; } bool Scanner::scanBlockScalarHeader(char &ChompingIndicator, unsigned &IndentIndicator, bool &IsDone) { auto Start = Current; ChompingIndicator = scanBlockChompingIndicator(); IndentIndicator = scanBlockIndentationIndicator(); // Check for the chomping indicator once again. if (ChompingIndicator == ' ') ChompingIndicator = scanBlockChompingIndicator(); Current = skip_while(&Scanner::skip_s_white, Current); skipComment(); if (Current == End) { // EOF, we have an empty scalar. Token T; T.Kind = Token::TK_BlockScalar; T.Range = StringRef(Start, Current - Start); TokenQueue.push_back(T); IsDone = true; return true; } if (!consumeLineBreakIfPresent()) { setError("Expected a line break after block scalar header", Current); return false; } return true; } bool Scanner::findBlockScalarIndent(unsigned &BlockIndent, unsigned BlockExitIndent, unsigned &LineBreaks, bool &IsDone) { unsigned MaxAllSpaceLineCharacters = 0; StringRef::iterator LongestAllSpaceLine; while (true) { advanceWhile(&Scanner::skip_s_space); if (skip_nb_char(Current) != Current) { // This line isn't empty, so try and find the indentation. if (Column <= BlockExitIndent) { // End of the block literal. IsDone = true; return true; } // We found the block's indentation. BlockIndent = Column; if (MaxAllSpaceLineCharacters > BlockIndent) { setError( "Leading all-spaces line must be smaller than the block indent", LongestAllSpaceLine); return false; } return true; } if (skip_b_break(Current) != Current && Column > MaxAllSpaceLineCharacters) { // Record the longest all-space line in case it's longer than the // discovered block indent. MaxAllSpaceLineCharacters = Column; LongestAllSpaceLine = Current; } // Check for EOF. if (Current == End) { IsDone = true; return true; } if (!consumeLineBreakIfPresent()) { IsDone = true; return true; } ++LineBreaks; } return true; } bool Scanner::scanBlockScalarIndent(unsigned BlockIndent, unsigned BlockExitIndent, bool &IsDone) { // Skip the indentation. while (Column < BlockIndent) { auto I = skip_s_space(Current); if (I == Current) break; Current = I; ++Column; } if (skip_nb_char(Current) == Current) return true; if (Column <= BlockExitIndent) { // End of the block literal. IsDone = true; return true; } if (Column < BlockIndent) { if (Current != End && *Current == '#') { // Trailing comment. IsDone = true; return true; } setError("A text line is less indented than the block scalar", Current); return false; } return true; // A normal text line. } bool Scanner::scanBlockScalar(bool IsLiteral) { // Eat '|' or '>' assert(*Current == '|' || *Current == '>'); skip(1); char ChompingIndicator; unsigned BlockIndent; bool IsDone = false; if (!scanBlockScalarHeader(ChompingIndicator, BlockIndent, IsDone)) return false; if (IsDone) return true; auto Start = Current; unsigned BlockExitIndent = Indent < 0 ? 0 : (unsigned)Indent; unsigned LineBreaks = 0; if (BlockIndent == 0) { if (!findBlockScalarIndent(BlockIndent, BlockExitIndent, LineBreaks, IsDone)) return false; } // Scan the block's scalars body. SmallString<256> Str; while (!IsDone) { if (!scanBlockScalarIndent(BlockIndent, BlockExitIndent, IsDone)) return false; if (IsDone) break; // Parse the current line. auto LineStart = Current; advanceWhile(&Scanner::skip_nb_char); if (LineStart != Current) { Str.append(LineBreaks, '\n'); Str.append(StringRef(LineStart, Current - LineStart)); LineBreaks = 0; } // Check for EOF. if (Current == End) break; if (!consumeLineBreakIfPresent()) break; ++LineBreaks; } if (Current == End && !LineBreaks) // Ensure that there is at least one line break before the end of file. LineBreaks = 1; Str.append(getChompedLineBreaks(ChompingIndicator, LineBreaks, Str), '\n'); // New lines may start a simple key. if (!FlowLevel) IsSimpleKeyAllowed = true; Token T; T.Kind = Token::TK_BlockScalar; T.Range = StringRef(Start, Current - Start); T.Value = Str.str().str(); TokenQueue.push_back(T); return true; } bool Scanner::scanTag() { StringRef::iterator Start = Current; unsigned ColStart = Column; skip(1); // Eat !. if (Current == End || isBlankOrBreak(Current)); // An empty tag. else if (*Current == '<') { skip(1); scan_ns_uri_char(); if (!consume('>')) return false; } else { // FIXME: Actually parse the c-ns-shorthand-tag rule. Current = skip_while(&Scanner::skip_ns_char, Current); } Token T; T.Kind = Token::TK_Tag; T.Range = StringRef(Start, Current - Start); TokenQueue.push_back(T); // Tags can be simple keys. saveSimpleKeyCandidate(TokenQueue.back(), ColStart, false); IsSimpleKeyAllowed = false; return true; } bool Scanner::fetchMoreTokens() { if (IsStartOfStream) return scanStreamStart(); scanToNextToken(); if (Current == End) return scanStreamEnd(); removeStaleSimpleKeyCandidates(); unrollIndent(Column); if (Column == 0 && *Current == '%') return scanDirective(); if (Column == 0 && Current + 4 <= End && *Current == '-' && *(Current + 1) == '-' && *(Current + 2) == '-' && (Current + 3 == End || isBlankOrBreak(Current + 3))) return scanDocumentIndicator(true); if (Column == 0 && Current + 4 <= End && *Current == '.' && *(Current + 1) == '.' && *(Current + 2) == '.' && (Current + 3 == End || isBlankOrBreak(Current + 3))) return scanDocumentIndicator(false); if (*Current == '[') return scanFlowCollectionStart(true); if (*Current == '{') return scanFlowCollectionStart(false); if (*Current == ']') return scanFlowCollectionEnd(true); if (*Current == '}') return scanFlowCollectionEnd(false); if (*Current == ',') return scanFlowEntry(); if (*Current == '-' && isBlankOrBreak(Current + 1)) return scanBlockEntry(); if (*Current == '?' && (FlowLevel || isBlankOrBreak(Current + 1))) return scanKey(); if (*Current == ':' && (FlowLevel || isBlankOrBreak(Current + 1))) return scanValue(); if (*Current == '*') return scanAliasOrAnchor(true); if (*Current == '&') return scanAliasOrAnchor(false); if (*Current == '!') return scanTag(); if (*Current == '|' && !FlowLevel) return scanBlockScalar(true); if (*Current == '>' && !FlowLevel) return scanBlockScalar(false); if (*Current == '\'') return scanFlowScalar(false); if (*Current == '"') return scanFlowScalar(true); // Get a plain scalar. StringRef FirstChar(Current, 1); if (!(isBlankOrBreak(Current) || FirstChar.find_first_of("-?:,[]{}#&*!|>'\"%@`") != StringRef::npos) || (*Current == '-' && !isBlankOrBreak(Current + 1)) || (!FlowLevel && (*Current == '?' || *Current == ':') && isBlankOrBreak(Current + 1)) || (!FlowLevel && *Current == ':' && Current + 2 < End && *(Current + 1) == ':' && !isBlankOrBreak(Current + 2))) return scanPlainScalar(); setError("Unrecognized character while tokenizing."); return false; } Stream::Stream(StringRef Input, SourceMgr &SM, bool ShowColors) : scanner(new Scanner(Input, SM, ShowColors)), CurrentDoc() {} Stream::Stream(MemoryBufferRef InputBuffer, SourceMgr &SM, bool ShowColors) : scanner(new Scanner(InputBuffer, SM, ShowColors)), CurrentDoc() {} Stream::~Stream() {} bool Stream::failed() { return scanner->failed(); } void Stream::printError(Node *N, const Twine &Msg) { scanner->printError( N->getSourceRange().Start , SourceMgr::DK_Error , Msg , N->getSourceRange()); } document_iterator Stream::begin() { if (CurrentDoc) report_fatal_error("Can only iterate over the stream once"); // Skip Stream-Start. scanner->getNext(); CurrentDoc.reset(new Document(*this)); return document_iterator(CurrentDoc); } document_iterator Stream::end() { return document_iterator(); } void Stream::skip() { for (document_iterator i = begin(), e = end(); i != e; ++i) i->skip(); } Node::Node(unsigned int Type, std::unique_ptr<Document> &D, StringRef A, StringRef T) : Doc(D), TypeID(Type), Anchor(A), Tag(T) { SMLoc Start = SMLoc::getFromPointer(peekNext().Range.begin()); SourceRange = SMRange(Start, Start); } std::string Node::getVerbatimTag() const { StringRef Raw = getRawTag(); if (!Raw.empty() && Raw != "!") { std::string Ret; if (Raw.find_last_of('!') == 0) { Ret = Doc->getTagMap().find("!")->second; Ret += Raw.substr(1); return Ret; } else if (Raw.startswith("!!")) { Ret = Doc->getTagMap().find("!!")->second; Ret += Raw.substr(2); return Ret; } else { StringRef TagHandle = Raw.substr(0, Raw.find_last_of('!') + 1); std::map<StringRef, StringRef>::const_iterator It = Doc->getTagMap().find(TagHandle); if (It != Doc->getTagMap().end()) Ret = It->second; else { Token T; T.Kind = Token::TK_Tag; T.Range = TagHandle; setError(Twine("Unknown tag handle ") + TagHandle, T); } Ret += Raw.substr(Raw.find_last_of('!') + 1); return Ret; } } switch (getType()) { case NK_Null: return "tag:yaml.org,2002:null"; case NK_Scalar: case NK_BlockScalar: // TODO: Tag resolution. return "tag:yaml.org,2002:str"; case NK_Mapping: return "tag:yaml.org,2002:map"; case NK_Sequence: return "tag:yaml.org,2002:seq"; } return ""; } Token &Node::peekNext() { return Doc->peekNext(); } Token Node::getNext() { return Doc->getNext(); } Node *Node::parseBlockNode() { return Doc->parseBlockNode(); } BumpPtrAllocator &Node::getAllocator() { return Doc->NodeAllocator; } void Node::setError(const Twine &Msg, Token &Tok) const { Doc->setError(Msg, Tok); } bool Node::failed() const { return Doc->failed(); } StringRef ScalarNode::getValue(SmallVectorImpl<char> &Storage) const { // TODO: Handle newlines properly. We need to remove leading whitespace. if (Value[0] == '"') { // Double quoted. // Pull off the leading and trailing "s. StringRef UnquotedValue = Value.substr(1, Value.size() - 2); // Search for characters that would require unescaping the value. StringRef::size_type i = UnquotedValue.find_first_of("\\\r\n"); if (i != StringRef::npos) return unescapeDoubleQuoted(UnquotedValue, i, Storage); return UnquotedValue; } else if (Value[0] == '\'') { // Single quoted. // Pull off the leading and trailing 's. StringRef UnquotedValue = Value.substr(1, Value.size() - 2); StringRef::size_type i = UnquotedValue.find('\''); if (i != StringRef::npos) { // We're going to need Storage. Storage.clear(); Storage.reserve(UnquotedValue.size()); for (; i != StringRef::npos; i = UnquotedValue.find('\'')) { StringRef Valid(UnquotedValue.begin(), i); Storage.insert(Storage.end(), Valid.begin(), Valid.end()); Storage.push_back('\''); UnquotedValue = UnquotedValue.substr(i + 2); } Storage.insert(Storage.end(), UnquotedValue.begin(), UnquotedValue.end()); return StringRef(Storage.begin(), Storage.size()); } return UnquotedValue; } // Plain or block. return Value.rtrim(" "); } StringRef ScalarNode::unescapeDoubleQuoted( StringRef UnquotedValue , StringRef::size_type i , SmallVectorImpl<char> &Storage) const { // Use Storage to build proper value. Storage.clear(); Storage.reserve(UnquotedValue.size()); for (; i != StringRef::npos; i = UnquotedValue.find_first_of("\\\r\n")) { // Insert all previous chars into Storage. StringRef Valid(UnquotedValue.begin(), i); Storage.insert(Storage.end(), Valid.begin(), Valid.end()); // Chop off inserted chars. UnquotedValue = UnquotedValue.substr(i); assert(!UnquotedValue.empty() && "Can't be empty!"); // Parse escape or line break. switch (UnquotedValue[0]) { case '\r': case '\n': Storage.push_back('\n'); if ( UnquotedValue.size() > 1 && (UnquotedValue[1] == '\r' || UnquotedValue[1] == '\n')) UnquotedValue = UnquotedValue.substr(1); UnquotedValue = UnquotedValue.substr(1); break; default: if (UnquotedValue.size() == 1) // TODO: Report error. break; UnquotedValue = UnquotedValue.substr(1); switch (UnquotedValue[0]) { default: { Token T; T.Range = StringRef(UnquotedValue.begin(), 1); setError("Unrecognized escape code!", T); return ""; } case '\r': case '\n': // Remove the new line. if ( UnquotedValue.size() > 1 && (UnquotedValue[1] == '\r' || UnquotedValue[1] == '\n')) UnquotedValue = UnquotedValue.substr(1); // If this was just a single byte newline, it will get skipped // below. break; case '0': Storage.push_back(0x00); break; case 'a': Storage.push_back(0x07); break; case 'b': Storage.push_back(0x08); break; case 't': case 0x09: Storage.push_back(0x09); break; case 'n': Storage.push_back(0x0A); break; case 'v': Storage.push_back(0x0B); break; case 'f': Storage.push_back(0x0C); break; case 'r': Storage.push_back(0x0D); break; case 'e': Storage.push_back(0x1B); break; case ' ': Storage.push_back(0x20); break; case '"': Storage.push_back(0x22); break; case '/': Storage.push_back(0x2F); break; case '\\': Storage.push_back(0x5C); break; case 'N': encodeUTF8(0x85, Storage); break; case '_': encodeUTF8(0xA0, Storage); break; case 'L': encodeUTF8(0x2028, Storage); break; case 'P': encodeUTF8(0x2029, Storage); break; case 'x': { if (UnquotedValue.size() < 3) // TODO: Report error. break; unsigned int UnicodeScalarValue; if (UnquotedValue.substr(1, 2).getAsInteger(16, UnicodeScalarValue)) // TODO: Report error. UnicodeScalarValue = 0xFFFD; encodeUTF8(UnicodeScalarValue, Storage); UnquotedValue = UnquotedValue.substr(2); break; } case 'u': { if (UnquotedValue.size() < 5) // TODO: Report error. break; unsigned int UnicodeScalarValue; if (UnquotedValue.substr(1, 4).getAsInteger(16, UnicodeScalarValue)) // TODO: Report error. UnicodeScalarValue = 0xFFFD; encodeUTF8(UnicodeScalarValue, Storage); UnquotedValue = UnquotedValue.substr(4); break; } case 'U': { if (UnquotedValue.size() < 9) // TODO: Report error. break; unsigned int UnicodeScalarValue; if (UnquotedValue.substr(1, 8).getAsInteger(16, UnicodeScalarValue)) // TODO: Report error. UnicodeScalarValue = 0xFFFD; encodeUTF8(UnicodeScalarValue, Storage); UnquotedValue = UnquotedValue.substr(8); break; } } UnquotedValue = UnquotedValue.substr(1); } } Storage.insert(Storage.end(), UnquotedValue.begin(), UnquotedValue.end()); return StringRef(Storage.begin(), Storage.size()); } Node *KeyValueNode::getKey() { if (Key) return Key; // Handle implicit null keys. { Token &t = peekNext(); if ( t.Kind == Token::TK_BlockEnd || t.Kind == Token::TK_Value || t.Kind == Token::TK_Error) { return Key = new (getAllocator()) NullNode(Doc); } if (t.Kind == Token::TK_Key) getNext(); // skip TK_Key. } // Handle explicit null keys. Token &t = peekNext(); if (t.Kind == Token::TK_BlockEnd || t.Kind == Token::TK_Value) { return Key = new (getAllocator()) NullNode(Doc); } // We've got a normal key. return Key = parseBlockNode(); } Node *KeyValueNode::getValue() { if (Value) return Value; getKey()->skip(); if (failed()) return Value = new (getAllocator()) NullNode(Doc); // Handle implicit null values. { Token &t = peekNext(); if ( t.Kind == Token::TK_BlockEnd || t.Kind == Token::TK_FlowMappingEnd || t.Kind == Token::TK_Key || t.Kind == Token::TK_FlowEntry || t.Kind == Token::TK_Error) { return Value = new (getAllocator()) NullNode(Doc); } if (t.Kind != Token::TK_Value) { setError("Unexpected token in Key Value.", t); return Value = new (getAllocator()) NullNode(Doc); } getNext(); // skip TK_Value. } // Handle explicit null values. Token &t = peekNext(); if (t.Kind == Token::TK_BlockEnd || t.Kind == Token::TK_Key) { return Value = new (getAllocator()) NullNode(Doc); } // We got a normal value. return Value = parseBlockNode(); } void MappingNode::increment() { if (failed()) { IsAtEnd = true; CurrentEntry = nullptr; return; } if (CurrentEntry) { CurrentEntry->skip(); if (Type == MT_Inline) { IsAtEnd = true; CurrentEntry = nullptr; return; } } Token T = peekNext(); if (T.Kind == Token::TK_Key || T.Kind == Token::TK_Scalar) { // KeyValueNode eats the TK_Key. That way it can detect null keys. CurrentEntry = new (getAllocator()) KeyValueNode(Doc); } else if (Type == MT_Block) { switch (T.Kind) { case Token::TK_BlockEnd: getNext(); IsAtEnd = true; CurrentEntry = nullptr; break; default: setError("Unexpected token. Expected Key or Block End", T); LLVM_FALLTHROUGH; // HLSL Change case Token::TK_Error: IsAtEnd = true; CurrentEntry = nullptr; } } else { switch (T.Kind) { case Token::TK_FlowEntry: // Eat the flow entry and recurse. getNext(); return increment(); case Token::TK_FlowMappingEnd: getNext(); LLVM_FALLTHROUGH; // HLSL Change case Token::TK_Error: // Set this to end iterator. IsAtEnd = true; CurrentEntry = nullptr; break; default: setError( "Unexpected token. Expected Key, Flow Entry, or Flow " "Mapping End." , T); IsAtEnd = true; CurrentEntry = nullptr; } } } void SequenceNode::increment() { if (failed()) { IsAtEnd = true; CurrentEntry = nullptr; return; } if (CurrentEntry) CurrentEntry->skip(); Token T = peekNext(); if (SeqType == ST_Block) { switch (T.Kind) { case Token::TK_BlockEntry: getNext(); CurrentEntry = parseBlockNode(); if (!CurrentEntry) { // An error occurred. IsAtEnd = true; CurrentEntry = nullptr; } break; case Token::TK_BlockEnd: getNext(); IsAtEnd = true; CurrentEntry = nullptr; break; default: setError( "Unexpected token. Expected Block Entry or Block End." , T); LLVM_FALLTHROUGH; // HLSL Change case Token::TK_Error: IsAtEnd = true; CurrentEntry = nullptr; } } else if (SeqType == ST_Indentless) { switch (T.Kind) { case Token::TK_BlockEntry: getNext(); CurrentEntry = parseBlockNode(); if (!CurrentEntry) { // An error occurred. IsAtEnd = true; CurrentEntry = nullptr; } break; default: case Token::TK_Error: IsAtEnd = true; CurrentEntry = nullptr; } } else if (SeqType == ST_Flow) { switch (T.Kind) { case Token::TK_FlowEntry: // Eat the flow entry and recurse. getNext(); WasPreviousTokenFlowEntry = true; return increment(); case Token::TK_FlowSequenceEnd: getNext(); LLVM_FALLTHROUGH; // HLSL Change case Token::TK_Error: // Set this to end iterator. IsAtEnd = true; CurrentEntry = nullptr; break; case Token::TK_StreamEnd: case Token::TK_DocumentEnd: case Token::TK_DocumentStart: setError("Could not find closing ]!", T); // Set this to end iterator. IsAtEnd = true; CurrentEntry = nullptr; break; default: if (!WasPreviousTokenFlowEntry) { setError("Expected , between entries!", T); IsAtEnd = true; CurrentEntry = nullptr; break; } // Otherwise it must be a flow entry. CurrentEntry = parseBlockNode(); if (!CurrentEntry) { IsAtEnd = true; } WasPreviousTokenFlowEntry = false; break; } } } Document::Document(Stream &S) : stream(S), Root(nullptr) { // Tag maps starts with two default mappings. TagMap["!"] = "!"; TagMap["!!"] = "tag:yaml.org,2002:"; if (parseDirectives()) expectToken(Token::TK_DocumentStart); Token &T = peekNext(); if (T.Kind == Token::TK_DocumentStart) getNext(); } bool Document::skip() { if (stream.scanner->failed()) return false; if (!Root) getRoot(); Root->skip(); Token &T = peekNext(); if (T.Kind == Token::TK_StreamEnd) return false; if (T.Kind == Token::TK_DocumentEnd) { getNext(); return skip(); } return true; } Token &Document::peekNext() { return stream.scanner->peekNext(); } Token Document::getNext() { return stream.scanner->getNext(); } void Document::setError(const Twine &Message, Token &Location) const { stream.scanner->setError(Message, Location.Range.begin()); } bool Document::failed() const { return stream.scanner->failed(); } Node *Document::parseBlockNode() { Token T = peekNext(); // Handle properties. Token AnchorInfo; Token TagInfo; parse_property: switch (T.Kind) { case Token::TK_Alias: getNext(); return new (NodeAllocator) AliasNode(stream.CurrentDoc, T.Range.substr(1)); case Token::TK_Anchor: if (AnchorInfo.Kind == Token::TK_Anchor) { setError("Already encountered an anchor for this node!", T); return nullptr; } AnchorInfo = getNext(); // Consume TK_Anchor. T = peekNext(); goto parse_property; case Token::TK_Tag: if (TagInfo.Kind == Token::TK_Tag) { setError("Already encountered a tag for this node!", T); return nullptr; } TagInfo = getNext(); // Consume TK_Tag. T = peekNext(); goto parse_property; default: break; } switch (T.Kind) { case Token::TK_BlockEntry: // We got an unindented BlockEntry sequence. This is not terminated with // a BlockEnd. // Don't eat the TK_BlockEntry, SequenceNode needs it. return new (NodeAllocator) SequenceNode( stream.CurrentDoc , AnchorInfo.Range.substr(1) , TagInfo.Range , SequenceNode::ST_Indentless); case Token::TK_BlockSequenceStart: getNext(); return new (NodeAllocator) SequenceNode( stream.CurrentDoc , AnchorInfo.Range.substr(1) , TagInfo.Range , SequenceNode::ST_Block); case Token::TK_BlockMappingStart: getNext(); return new (NodeAllocator) MappingNode( stream.CurrentDoc , AnchorInfo.Range.substr(1) , TagInfo.Range , MappingNode::MT_Block); case Token::TK_FlowSequenceStart: getNext(); return new (NodeAllocator) SequenceNode( stream.CurrentDoc , AnchorInfo.Range.substr(1) , TagInfo.Range , SequenceNode::ST_Flow); case Token::TK_FlowMappingStart: getNext(); return new (NodeAllocator) MappingNode( stream.CurrentDoc , AnchorInfo.Range.substr(1) , TagInfo.Range , MappingNode::MT_Flow); case Token::TK_Scalar: getNext(); return new (NodeAllocator) ScalarNode( stream.CurrentDoc , AnchorInfo.Range.substr(1) , TagInfo.Range , T.Range); case Token::TK_BlockScalar: { getNext(); StringRef NullTerminatedStr(T.Value.c_str(), T.Value.length() + 1); StringRef StrCopy = NullTerminatedStr.copy(NodeAllocator).drop_back(); return new (NodeAllocator) BlockScalarNode(stream.CurrentDoc, AnchorInfo.Range.substr(1), TagInfo.Range, StrCopy, T.Range); } case Token::TK_Key: // Don't eat the TK_Key, KeyValueNode expects it. return new (NodeAllocator) MappingNode( stream.CurrentDoc , AnchorInfo.Range.substr(1) , TagInfo.Range , MappingNode::MT_Inline); case Token::TK_DocumentStart: case Token::TK_DocumentEnd: case Token::TK_StreamEnd: default: // TODO: Properly handle tags. "[!!str ]" should resolve to !!str "", not // !!null null. return new (NodeAllocator) NullNode(stream.CurrentDoc); case Token::TK_Error: return nullptr; } llvm_unreachable("Control flow shouldn't reach here."); return nullptr; } bool Document::parseDirectives() { bool isDirective = false; while (true) { Token T = peekNext(); if (T.Kind == Token::TK_TagDirective) { parseTAGDirective(); isDirective = true; } else if (T.Kind == Token::TK_VersionDirective) { parseYAMLDirective(); isDirective = true; } else break; } return isDirective; } void Document::parseYAMLDirective() { getNext(); // Eat %YAML <version> } void Document::parseTAGDirective() { Token Tag = getNext(); // %TAG <handle> <prefix> StringRef T = Tag.Range; // Strip %TAG T = T.substr(T.find_first_of(" \t")).ltrim(" \t"); std::size_t HandleEnd = T.find_first_of(" \t"); StringRef TagHandle = T.substr(0, HandleEnd); StringRef TagPrefix = T.substr(HandleEnd).ltrim(" \t"); TagMap[TagHandle] = TagPrefix; } bool Document::expectToken(int TK) { Token T = getNext(); if (T.Kind != TK) { setError("Unexpected token", T); return false; } return true; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/Triple.cpp
//===--- Triple.cpp - Target triple helper class --------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/ADT/Triple.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/TargetParser.h" #include "llvm/Support/Host.h" #include <cstring> using namespace llvm; const char *Triple::getArchTypeName(ArchType Kind) { switch (Kind) { case UnknownArch: return "unknown"; case aarch64: return "aarch64"; case aarch64_be: return "aarch64_be"; case arm: return "arm"; case armeb: return "armeb"; case bpfel: return "bpfel"; case bpfeb: return "bpfeb"; case hexagon: return "hexagon"; case mips: return "mips"; case mipsel: return "mipsel"; case mips64: return "mips64"; case mips64el: return "mips64el"; case msp430: return "msp430"; case ppc64: return "powerpc64"; case ppc64le: return "powerpc64le"; case ppc: return "powerpc"; case r600: return "r600"; case amdgcn: return "amdgcn"; case sparc: return "sparc"; case sparcv9: return "sparcv9"; case sparcel: return "sparcel"; case systemz: return "s390x"; case tce: return "tce"; case thumb: return "thumb"; case thumbeb: return "thumbeb"; case x86: return "i386"; case x86_64: return "x86_64"; case xcore: return "xcore"; case nvptx: return "nvptx"; case nvptx64: return "nvptx64"; case le32: return "le32"; case le64: return "le64"; case amdil: return "amdil"; case amdil64: return "amdil64"; case hsail: return "hsail"; case hsail64: return "hsail64"; case spir: return "spir"; case spir64: return "spir64"; // HLSL Change Begins case dxil: return "dxil"; case dxil64: return "dxil64"; // HLSL Change Ends case kalimba: return "kalimba"; case shave: return "shave"; case wasm32: return "wasm32"; case wasm64: return "wasm64"; } llvm_unreachable("Invalid ArchType!"); } const char *Triple::getArchTypePrefix(ArchType Kind) { switch (Kind) { default: return nullptr; case aarch64: case aarch64_be: return "aarch64"; case arm: case armeb: case thumb: case thumbeb: return "arm"; case ppc64: case ppc64le: case ppc: return "ppc"; case mips: case mipsel: case mips64: case mips64el: return "mips"; case hexagon: return "hexagon"; case amdgcn: case r600: return "amdgpu"; case bpfel: case bpfeb: return "bpf"; case sparcv9: case sparcel: case sparc: return "sparc"; case systemz: return "s390"; case x86: case x86_64: return "x86"; case xcore: return "xcore"; case nvptx: return "nvptx"; case nvptx64: return "nvptx"; case le32: return "le32"; case le64: return "le64"; case amdil: case amdil64: return "amdil"; case hsail: case hsail64: return "hsail"; case spir: case spir64: return "spir"; // HLSL Change Begins case dxil: case dxil64: return "dxil"; // HLSL Change Ends case kalimba: return "kalimba"; case shave: return "shave"; case wasm32: return "wasm32"; case wasm64: return "wasm64"; } } const char *Triple::getVendorTypeName(VendorType Kind) { switch (Kind) { case UnknownVendor: return "unknown"; case Apple: return "apple"; case PC: return "pc"; case SCEI: return "scei"; case BGP: return "bgp"; case BGQ: return "bgq"; case Freescale: return "fsl"; case IBM: return "ibm"; case ImaginationTechnologies: return "img"; case MipsTechnologies: return "mti"; case Microsoft: return "ms"; // HLSL Change case NVIDIA: return "nvidia"; case CSR: return "csr"; } llvm_unreachable("Invalid VendorType!"); } const char *Triple::getOSTypeName(OSType Kind) { switch (Kind) { case UnknownOS: return "unknown"; case CloudABI: return "cloudabi"; case Darwin: return "darwin"; case DragonFly: return "dragonfly"; case FreeBSD: return "freebsd"; case IOS: return "ios"; case KFreeBSD: return "kfreebsd"; case Linux: return "linux"; case Lv2: return "lv2"; case MacOSX: return "macosx"; case NetBSD: return "netbsd"; case OpenBSD: return "openbsd"; case Solaris: return "solaris"; case Win32: return "windows"; case Haiku: return "haiku"; case Minix: return "minix"; case RTEMS: return "rtems"; case NaCl: return "nacl"; case CNK: return "cnk"; case Bitrig: return "bitrig"; case AIX: return "aix"; case CUDA: return "cuda"; case NVCL: return "nvcl"; case AMDHSA: return "amdhsa"; case DirectX: return "dx"; // HLSL Change case PS4: return "ps4"; } llvm_unreachable("Invalid OSType"); } const char *Triple::getEnvironmentTypeName(EnvironmentType Kind) { switch (Kind) { case UnknownEnvironment: return "unknown"; case GNU: return "gnu"; case GNUEABIHF: return "gnueabihf"; case GNUEABI: return "gnueabi"; case GNUX32: return "gnux32"; case CODE16: return "code16"; case EABI: return "eabi"; case EABIHF: return "eabihf"; case Android: return "android"; case MSVC: return "msvc"; case Itanium: return "itanium"; case Cygnus: return "cygnus"; } llvm_unreachable("Invalid EnvironmentType!"); } static Triple::ArchType parseBPFArch(StringRef ArchName) { if (ArchName.equals("bpf")) { if (sys::IsLittleEndianHost) return Triple::bpfel; else return Triple::bpfeb; } else if (ArchName.equals("bpf_be") || ArchName.equals("bpfeb")) { return Triple::bpfeb; } else if (ArchName.equals("bpf_le") || ArchName.equals("bpfel")) { return Triple::bpfel; } else { return Triple::UnknownArch; } } Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) { Triple::ArchType BPFArch(parseBPFArch(Name)); return StringSwitch<Triple::ArchType>(Name) .Case("aarch64", aarch64) .Case("aarch64_be", aarch64_be) .Case("arm64", aarch64) // "arm64" is an alias for "aarch64" .Case("arm", arm) .Case("armeb", armeb) .StartsWith("bpf", BPFArch) .Case("mips", mips) .Case("mipsel", mipsel) .Case("mips64", mips64) .Case("mips64el", mips64el) .Case("msp430", msp430) .Case("ppc64", ppc64) .Case("ppc32", ppc) .Case("ppc", ppc) .Case("ppc64le", ppc64le) .Case("r600", r600) .Case("amdgcn", amdgcn) .Case("hexagon", hexagon) .Case("sparc", sparc) .Case("sparcel", sparcel) .Case("sparcv9", sparcv9) .Case("systemz", systemz) .Case("tce", tce) .Case("thumb", thumb) .Case("thumbeb", thumbeb) .Case("x86", x86) .Case("x86-64", x86_64) .Case("xcore", xcore) .Case("nvptx", nvptx) .Case("nvptx64", nvptx64) .Case("le32", le32) .Case("le64", le64) .Case("amdil", amdil) .Case("amdil64", amdil64) .Case("hsail", hsail) .Case("hsail64", hsail64) .Case("spir", spir) .Case("spir64", spir64) // HLSL Change Begins .Case("dxil", dxil) .Case("dxil64", dxil64) // HLSL Change Ends .Case("kalimba", kalimba) .Case("shave", shave) .Case("wasm32", wasm32) .Case("wasm64", wasm64) .Default(UnknownArch); } static Triple::ArchType parseARMArch(StringRef ArchName) { unsigned ISA = ARMTargetParser::parseArchISA(ArchName); unsigned ENDIAN = ARMTargetParser::parseArchEndian(ArchName); Triple::ArchType arch = Triple::UnknownArch; switch (ENDIAN) { case ARM::EK_LITTLE: { switch (ISA) { case ARM::IK_ARM: arch = Triple::arm; break; case ARM::IK_THUMB: arch = Triple::thumb; break; case ARM::IK_AARCH64: arch = Triple::aarch64; break; } break; } case ARM::EK_BIG: { switch (ISA) { case ARM::IK_ARM: arch = Triple::armeb; break; case ARM::IK_THUMB: arch = Triple::thumbeb; break; case ARM::IK_AARCH64: arch = Triple::aarch64_be; break; } break; } } ArchName = ARMTargetParser::getCanonicalArchName(ArchName); if (ArchName.empty()) return Triple::UnknownArch; // Thumb only exists in v4+ if (ISA == ARM::IK_THUMB && (ArchName.startswith("v2") || ArchName.startswith("v3"))) return Triple::UnknownArch; // Thumb only for v6m unsigned Profile = ARMTargetParser::parseArchProfile(ArchName); unsigned Version = ARMTargetParser::parseArchVersion(ArchName); if (Profile == ARM::PK_M && Version == 6) { if (ENDIAN == ARM::EK_BIG) return Triple::thumbeb; else return Triple::thumb; } return arch; } static Triple::ArchType parseArch(StringRef ArchName) { Triple::ArchType ARMArch(parseARMArch(ArchName)); Triple::ArchType BPFArch(parseBPFArch(ArchName)); return StringSwitch<Triple::ArchType>(ArchName) .Cases("i386", "i486", "i586", "i686", Triple::x86) // FIXME: Do we need to support these? .Cases("i786", "i886", "i986", Triple::x86) .Cases("amd64", "x86_64", "x86_64h", Triple::x86_64) .Case("powerpc", Triple::ppc) .Cases("powerpc64", "ppu", Triple::ppc64) .Case("powerpc64le", Triple::ppc64le) .Case("xscale", Triple::arm) .Case("xscaleeb", Triple::armeb) .StartsWith("arm", ARMArch) .StartsWith("thumb", ARMArch) .StartsWith("aarch64", ARMArch) .Case("msp430", Triple::msp430) .Cases("mips", "mipseb", "mipsallegrex", Triple::mips) .Cases("mipsel", "mipsallegrexel", Triple::mipsel) .Cases("mips64", "mips64eb", Triple::mips64) .Case("mips64el", Triple::mips64el) .Case("r600", Triple::r600) .Case("amdgcn", Triple::amdgcn) .StartsWith("bpf", BPFArch) .Case("hexagon", Triple::hexagon) .Case("s390x", Triple::systemz) .Case("sparc", Triple::sparc) .Case("sparcel", Triple::sparcel) .Cases("sparcv9", "sparc64", Triple::sparcv9) .Case("tce", Triple::tce) .Case("xcore", Triple::xcore) .Case("nvptx", Triple::nvptx) .Case("nvptx64", Triple::nvptx64) .Case("le32", Triple::le32) .Case("le64", Triple::le64) .Case("amdil", Triple::amdil) .Case("amdil64", Triple::amdil64) .Case("hsail", Triple::hsail) .Case("hsail64", Triple::hsail64) .Case("spir", Triple::spir) .Case("spir64", Triple::spir64) // HLSL Change Begins .Case("dxil", Triple::dxil) .Case("dxil64", Triple::dxil64) // HLSL Change Ends .StartsWith("kalimba", Triple::kalimba) .Case("shave", Triple::shave) .Case("wasm32", Triple::wasm32) .Case("wasm64", Triple::wasm64) .Default(Triple::UnknownArch); } static Triple::VendorType parseVendor(StringRef VendorName) { return StringSwitch<Triple::VendorType>(VendorName) .Case("apple", Triple::Apple) .Case("pc", Triple::PC) .Case("scei", Triple::SCEI) .Case("bgp", Triple::BGP) .Case("bgq", Triple::BGQ) .Case("fsl", Triple::Freescale) .Case("ibm", Triple::IBM) .Case("img", Triple::ImaginationTechnologies) .Case("mti", Triple::MipsTechnologies) .Case("ms", Triple::Microsoft) // HLSL Change .Case("nvidia", Triple::NVIDIA) .Case("csr", Triple::CSR) .Default(Triple::UnknownVendor); } static Triple::OSType parseOS(StringRef OSName) { return StringSwitch<Triple::OSType>(OSName) .StartsWith("cloudabi", Triple::CloudABI) .StartsWith("darwin", Triple::Darwin) .StartsWith("dragonfly", Triple::DragonFly) .StartsWith("freebsd", Triple::FreeBSD) .StartsWith("ios", Triple::IOS) .StartsWith("kfreebsd", Triple::KFreeBSD) .StartsWith("linux", Triple::Linux) .StartsWith("lv2", Triple::Lv2) .StartsWith("macosx", Triple::MacOSX) .StartsWith("netbsd", Triple::NetBSD) .StartsWith("openbsd", Triple::OpenBSD) .StartsWith("solaris", Triple::Solaris) .StartsWith("win32", Triple::Win32) .StartsWith("windows", Triple::Win32) .StartsWith("haiku", Triple::Haiku) .StartsWith("minix", Triple::Minix) .StartsWith("rtems", Triple::RTEMS) .StartsWith("nacl", Triple::NaCl) .StartsWith("cnk", Triple::CNK) .StartsWith("bitrig", Triple::Bitrig) .StartsWith("aix", Triple::AIX) .StartsWith("cuda", Triple::CUDA) .StartsWith("nvcl", Triple::NVCL) .StartsWith("amdhsa", Triple::AMDHSA) .Case("dx", Triple::DirectX) // HLSL Change - For DirectX this must match .StartsWith("ps4", Triple::PS4) .Default(Triple::UnknownOS); } static Triple::EnvironmentType parseEnvironment(StringRef EnvironmentName) { return StringSwitch<Triple::EnvironmentType>(EnvironmentName) .StartsWith("eabihf", Triple::EABIHF) .StartsWith("eabi", Triple::EABI) .StartsWith("gnueabihf", Triple::GNUEABIHF) .StartsWith("gnueabi", Triple::GNUEABI) .StartsWith("gnux32", Triple::GNUX32) .StartsWith("code16", Triple::CODE16) .StartsWith("gnu", Triple::GNU) .StartsWith("android", Triple::Android) .StartsWith("msvc", Triple::MSVC) .StartsWith("itanium", Triple::Itanium) .StartsWith("cygnus", Triple::Cygnus) .Default(Triple::UnknownEnvironment); } static Triple::ObjectFormatType parseFormat(StringRef EnvironmentName) { return StringSwitch<Triple::ObjectFormatType>(EnvironmentName) .EndsWith("coff", Triple::COFF) .EndsWith("elf", Triple::ELF) .EndsWith("macho", Triple::MachO) .Default(Triple::UnknownObjectFormat); } static Triple::SubArchType parseSubArch(StringRef SubArchName) { StringRef ARMSubArch = ARMTargetParser::getCanonicalArchName(SubArchName); // For now, this is the small part. Early return. if (ARMSubArch.empty()) return StringSwitch<Triple::SubArchType>(SubArchName) .EndsWith("kalimba3", Triple::KalimbaSubArch_v3) .EndsWith("kalimba4", Triple::KalimbaSubArch_v4) .EndsWith("kalimba5", Triple::KalimbaSubArch_v5) .Default(Triple::NoSubArch); // ARM sub arch. switch(ARMTargetParser::parseArch(ARMSubArch)) { case ARM::AK_ARMV4: return Triple::NoSubArch; case ARM::AK_ARMV4T: return Triple::ARMSubArch_v4t; case ARM::AK_ARMV5: case ARM::AK_ARMV5T: case ARM::AK_ARMV5E: return Triple::ARMSubArch_v5; case ARM::AK_ARMV5TE: case ARM::AK_IWMMXT: case ARM::AK_IWMMXT2: case ARM::AK_XSCALE: case ARM::AK_ARMV5TEJ: return Triple::ARMSubArch_v5te; case ARM::AK_ARMV6: case ARM::AK_ARMV6J: case ARM::AK_ARMV6Z: return Triple::ARMSubArch_v6; case ARM::AK_ARMV6K: case ARM::AK_ARMV6ZK: case ARM::AK_ARMV6HL: return Triple::ARMSubArch_v6k; case ARM::AK_ARMV6T2: return Triple::ARMSubArch_v6t2; case ARM::AK_ARMV6M: case ARM::AK_ARMV6SM: return Triple::ARMSubArch_v6m; case ARM::AK_ARMV7: case ARM::AK_ARMV7A: case ARM::AK_ARMV7R: case ARM::AK_ARMV7L: case ARM::AK_ARMV7HL: return Triple::ARMSubArch_v7; case ARM::AK_ARMV7M: return Triple::ARMSubArch_v7m; case ARM::AK_ARMV7S: return Triple::ARMSubArch_v7s; case ARM::AK_ARMV7EM: return Triple::ARMSubArch_v7em; case ARM::AK_ARMV8A: return Triple::ARMSubArch_v8; case ARM::AK_ARMV8_1A: return Triple::ARMSubArch_v8_1a; default: return Triple::NoSubArch; } } static const char *getObjectFormatTypeName(Triple::ObjectFormatType Kind) { switch (Kind) { case Triple::UnknownObjectFormat: return ""; case Triple::COFF: return "coff"; case Triple::ELF: return "elf"; case Triple::MachO: return "macho"; } llvm_unreachable("unknown object format type"); } static Triple::ObjectFormatType getDefaultFormat(const Triple &T) { switch (T.getArch()) { default: break; case Triple::hexagon: case Triple::mips: case Triple::mipsel: case Triple::mips64: case Triple::mips64el: case Triple::r600: case Triple::amdgcn: case Triple::sparc: case Triple::sparcv9: case Triple::systemz: case Triple::xcore: case Triple::ppc64le: return Triple::ELF; case Triple::ppc: case Triple::ppc64: if (T.isOSDarwin()) return Triple::MachO; return Triple::ELF; } if (T.isOSDarwin()) return Triple::MachO; else if (T.isOSWindows()) return Triple::COFF; return Triple::ELF; } /// \brief Construct a triple from the string representation provided. /// /// This stores the string representation and parses the various pieces into /// enum members. Triple::Triple(const Twine &Str) : Data(Str.str()), Arch(parseArch(getArchName())), SubArch(parseSubArch(getArchName())), Vendor(parseVendor(getVendorName())), OS(parseOS(getOSName())), Environment(parseEnvironment(getEnvironmentName())), ObjectFormat(parseFormat(getEnvironmentName())) { if (ObjectFormat == Triple::UnknownObjectFormat) ObjectFormat = getDefaultFormat(*this); } /// \brief Construct a triple from string representations of the architecture, /// vendor, and OS. /// /// This joins each argument into a canonical string representation and parses /// them into enum members. It leaves the environment unknown and omits it from /// the string representation. Triple::Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr) : Data((ArchStr + Twine('-') + VendorStr + Twine('-') + OSStr).str()), Arch(parseArch(ArchStr.str())), SubArch(parseSubArch(ArchStr.str())), Vendor(parseVendor(VendorStr.str())), OS(parseOS(OSStr.str())), Environment(), ObjectFormat(Triple::UnknownObjectFormat) { ObjectFormat = getDefaultFormat(*this); } /// \brief Construct a triple from string representations of the architecture, /// vendor, OS, and environment. /// /// This joins each argument into a canonical string representation and parses /// them into enum members. Triple::Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr, const Twine &EnvironmentStr) : Data((ArchStr + Twine('-') + VendorStr + Twine('-') + OSStr + Twine('-') + EnvironmentStr).str()), Arch(parseArch(ArchStr.str())), SubArch(parseSubArch(ArchStr.str())), Vendor(parseVendor(VendorStr.str())), OS(parseOS(OSStr.str())), Environment(parseEnvironment(EnvironmentStr.str())), ObjectFormat(parseFormat(EnvironmentStr.str())) { if (ObjectFormat == Triple::UnknownObjectFormat) ObjectFormat = getDefaultFormat(*this); } std::string Triple::normalize(StringRef Str) { bool IsMinGW32 = false; bool IsCygwin = false; // Parse into components. SmallVector<StringRef, 4> Components; Str.split(Components, "-"); // If the first component corresponds to a known architecture, preferentially // use it for the architecture. If the second component corresponds to a // known vendor, preferentially use it for the vendor, etc. This avoids silly // component movement when a component parses as (eg) both a valid arch and a // valid os. ArchType Arch = UnknownArch; if (Components.size() > 0) Arch = parseArch(Components[0]); VendorType Vendor = UnknownVendor; if (Components.size() > 1) Vendor = parseVendor(Components[1]); OSType OS = UnknownOS; if (Components.size() > 2) { OS = parseOS(Components[2]); IsCygwin = Components[2].startswith("cygwin"); IsMinGW32 = Components[2].startswith("mingw"); } EnvironmentType Environment = UnknownEnvironment; if (Components.size() > 3) Environment = parseEnvironment(Components[3]); ObjectFormatType ObjectFormat = UnknownObjectFormat; if (Components.size() > 4) ObjectFormat = parseFormat(Components[4]); // Note which components are already in their final position. These will not // be moved. bool Found[4]; Found[0] = Arch != UnknownArch; Found[1] = Vendor != UnknownVendor; Found[2] = OS != UnknownOS; Found[3] = Environment != UnknownEnvironment; // If they are not there already, permute the components into their canonical // positions by seeing if they parse as a valid architecture, and if so moving // the component to the architecture position etc. for (unsigned Pos = 0; Pos != array_lengthof(Found); ++Pos) { if (Found[Pos]) continue; // Already in the canonical position. for (unsigned Idx = 0; Idx != Components.size(); ++Idx) { // Do not reparse any components that already matched. if (Idx < array_lengthof(Found) && Found[Idx]) continue; // Does this component parse as valid for the target position? bool Valid = false; StringRef Comp = Components[Idx]; switch (Pos) { default: llvm_unreachable("unexpected component type!"); case 0: Arch = parseArch(Comp); Valid = Arch != UnknownArch; break; case 1: Vendor = parseVendor(Comp); Valid = Vendor != UnknownVendor; break; case 2: OS = parseOS(Comp); IsCygwin = Comp.startswith("cygwin"); IsMinGW32 = Comp.startswith("mingw"); Valid = OS != UnknownOS || IsCygwin || IsMinGW32; break; case 3: Environment = parseEnvironment(Comp); Valid = Environment != UnknownEnvironment; if (!Valid) { ObjectFormat = parseFormat(Comp); Valid = ObjectFormat != UnknownObjectFormat; } break; } if (!Valid) continue; // Nope, try the next component. // Move the component to the target position, pushing any non-fixed // components that are in the way to the right. This tends to give // good results in the common cases of a forgotten vendor component // or a wrongly positioned environment. if (Pos < Idx) { // Insert left, pushing the existing components to the right. For // example, a-b-i386 -> i386-a-b when moving i386 to the front. StringRef CurrentComponent(""); // The empty component. // Replace the component we are moving with an empty component. std::swap(CurrentComponent, Components[Idx]); // Insert the component being moved at Pos, displacing any existing // components to the right. for (unsigned i = Pos; !CurrentComponent.empty(); ++i) { // Skip over any fixed components. while (i < array_lengthof(Found) && Found[i]) ++i; // Place the component at the new position, getting the component // that was at this position - it will be moved right. std::swap(CurrentComponent, Components[i]); } } else if (Pos > Idx) { // Push right by inserting empty components until the component at Idx // reaches the target position Pos. For example, pc-a -> -pc-a when // moving pc to the second position. do { // Insert one empty component at Idx. StringRef CurrentComponent(""); // The empty component. for (unsigned i = Idx; i < Components.size();) { // Place the component at the new position, getting the component // that was at this position - it will be moved right. std::swap(CurrentComponent, Components[i]); // If it was placed on top of an empty component then we are done. if (CurrentComponent.empty()) break; // Advance to the next component, skipping any fixed components. while (++i < array_lengthof(Found) && Found[i]) ; } // The last component was pushed off the end - append it. if (!CurrentComponent.empty()) Components.push_back(CurrentComponent); // Advance Idx to the component's new position. while (++Idx < array_lengthof(Found) && Found[Idx]) ; } while (Idx < Pos); // Add more until the final position is reached. } assert(Pos < Components.size() && Components[Pos] == Comp && "Component moved wrong!"); Found[Pos] = true; break; } } // Special case logic goes here. At this point Arch, Vendor and OS have the // correct values for the computed components. std::string NormalizedEnvironment; if (Environment == Triple::Android && Components[3].startswith("androideabi")) { StringRef AndroidVersion = Components[3].drop_front(strlen("androideabi")); if (AndroidVersion.empty()) { Components[3] = "android"; } else { NormalizedEnvironment = Twine("android", AndroidVersion).str(); Components[3] = NormalizedEnvironment; } } if (OS == Triple::Win32) { Components.resize(4); Components[2] = "windows"; if (Environment == UnknownEnvironment) { if (ObjectFormat == UnknownObjectFormat || ObjectFormat == Triple::COFF) Components[3] = "msvc"; else Components[3] = getObjectFormatTypeName(ObjectFormat); } } else if (IsMinGW32) { Components.resize(4); Components[2] = "windows"; Components[3] = "gnu"; } else if (IsCygwin) { Components.resize(4); Components[2] = "windows"; Components[3] = "cygnus"; } if (IsMinGW32 || IsCygwin || (OS == Triple::Win32 && Environment != UnknownEnvironment)) { if (ObjectFormat != UnknownObjectFormat && ObjectFormat != Triple::COFF) { Components.resize(5); Components[4] = getObjectFormatTypeName(ObjectFormat); } } // Stick the corrected components back together to form the normalized string. std::string Normalized; for (unsigned i = 0, e = Components.size(); i != e; ++i) { if (i) Normalized += '-'; Normalized += Components[i]; } return Normalized; } StringRef Triple::getArchName() const { return StringRef(Data).split('-').first; // Isolate first component } StringRef Triple::getVendorName() const { StringRef Tmp = StringRef(Data).split('-').second; // Strip first component return Tmp.split('-').first; // Isolate second component } StringRef Triple::getOSName() const { StringRef Tmp = StringRef(Data).split('-').second; // Strip first component Tmp = Tmp.split('-').second; // Strip second component return Tmp.split('-').first; // Isolate third component } StringRef Triple::getEnvironmentName() const { StringRef Tmp = StringRef(Data).split('-').second; // Strip first component Tmp = Tmp.split('-').second; // Strip second component return Tmp.split('-').second; // Strip third component } StringRef Triple::getOSAndEnvironmentName() const { StringRef Tmp = StringRef(Data).split('-').second; // Strip first component return Tmp.split('-').second; // Strip second component } static unsigned EatNumber(StringRef &Str) { assert(!Str.empty() && Str[0] >= '0' && Str[0] <= '9' && "Not a number"); unsigned Result = 0; do { // Consume the leading digit. Result = Result*10 + (Str[0] - '0'); // Eat the digit. Str = Str.substr(1); } while (!Str.empty() && Str[0] >= '0' && Str[0] <= '9'); return Result; } static void parseVersionFromName(StringRef Name, unsigned &Major, unsigned &Minor, unsigned &Micro) { // Any unset version defaults to 0. Major = Minor = Micro = 0; // Parse up to three components. unsigned *Components[3] = {&Major, &Minor, &Micro}; for (unsigned i = 0; i != 3; ++i) { if (Name.empty() || Name[0] < '0' || Name[0] > '9') break; // Consume the leading number. *Components[i] = EatNumber(Name); // Consume the separator, if present. if (Name.startswith(".")) Name = Name.substr(1); } } void Triple::getEnvironmentVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const { StringRef EnvironmentName = getEnvironmentName(); StringRef EnvironmentTypeName = getEnvironmentTypeName(getEnvironment()); if (EnvironmentName.startswith(EnvironmentTypeName)) EnvironmentName = EnvironmentName.substr(EnvironmentTypeName.size()); parseVersionFromName(EnvironmentName, Major, Minor, Micro); } void Triple::getOSVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const { StringRef OSName = getOSName(); // Assume that the OS portion of the triple starts with the canonical name. StringRef OSTypeName = getOSTypeName(getOS()); if (OSName.startswith(OSTypeName)) OSName = OSName.substr(OSTypeName.size()); parseVersionFromName(OSName, Major, Minor, Micro); } bool Triple::getMacOSXVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const { getOSVersion(Major, Minor, Micro); switch (getOS()) { default: llvm_unreachable("unexpected OS for Darwin triple"); case Darwin: // Default to darwin8, i.e., MacOSX 10.4. if (Major == 0) Major = 8; // Darwin version numbers are skewed from OS X versions. if (Major < 4) return false; Micro = 0; Minor = Major - 4; Major = 10; break; case MacOSX: // Default to 10.4. if (Major == 0) { Major = 10; Minor = 4; } if (Major != 10) return false; break; case IOS: // Ignore the version from the triple. This is only handled because the // the clang driver combines OS X and IOS support into a common Darwin // toolchain that wants to know the OS X version number even when targeting // IOS. Major = 10; Minor = 4; Micro = 0; break; } return true; } void Triple::getiOSVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const { switch (getOS()) { default: llvm_unreachable("unexpected OS for Darwin triple"); case Darwin: case MacOSX: // Ignore the version from the triple. This is only handled because the // the clang driver combines OS X and IOS support into a common Darwin // toolchain that wants to know the iOS version number even when targeting // OS X. Major = 5; Minor = 0; Micro = 0; break; case IOS: getOSVersion(Major, Minor, Micro); // Default to 5.0 (or 7.0 for arm64). if (Major == 0) Major = (getArch() == aarch64) ? 7 : 5; break; } } void Triple::setTriple(const Twine &Str) { *this = Triple(Str); } void Triple::setArch(ArchType Kind) { setArchName(getArchTypeName(Kind)); } void Triple::setVendor(VendorType Kind) { setVendorName(getVendorTypeName(Kind)); } void Triple::setOS(OSType Kind) { setOSName(getOSTypeName(Kind)); } void Triple::setEnvironment(EnvironmentType Kind) { if (ObjectFormat == getDefaultFormat(*this)) return setEnvironmentName(getEnvironmentTypeName(Kind)); setEnvironmentName((getEnvironmentTypeName(Kind) + Twine("-") + getObjectFormatTypeName(ObjectFormat)).str()); } void Triple::setObjectFormat(ObjectFormatType Kind) { if (Environment == UnknownEnvironment) return setEnvironmentName(getObjectFormatTypeName(Kind)); setEnvironmentName((getEnvironmentTypeName(Environment) + Twine("-") + getObjectFormatTypeName(Kind)).str()); } void Triple::setArchName(StringRef Str) { // Work around a miscompilation bug for Twines in gcc 4.0.3. SmallString<64> Triple; Triple += Str; Triple += "-"; Triple += getVendorName(); Triple += "-"; Triple += getOSAndEnvironmentName(); setTriple(Triple); } void Triple::setVendorName(StringRef Str) { setTriple(getArchName() + "-" + Str + "-" + getOSAndEnvironmentName()); } void Triple::setOSName(StringRef Str) { if (hasEnvironment()) setTriple(getArchName() + "-" + getVendorName() + "-" + Str + "-" + getEnvironmentName()); else setTriple(getArchName() + "-" + getVendorName() + "-" + Str); } void Triple::setEnvironmentName(StringRef Str) { setTriple(getArchName() + "-" + getVendorName() + "-" + getOSName() + "-" + Str); } void Triple::setOSAndEnvironmentName(StringRef Str) { setTriple(getArchName() + "-" + getVendorName() + "-" + Str); } static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) { switch (Arch) { case llvm::Triple::UnknownArch: return 0; case llvm::Triple::msp430: return 16; case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::hexagon: case llvm::Triple::le32: case llvm::Triple::mips: case llvm::Triple::mipsel: case llvm::Triple::nvptx: case llvm::Triple::ppc: case llvm::Triple::r600: case llvm::Triple::sparc: case llvm::Triple::sparcel: case llvm::Triple::tce: case llvm::Triple::thumb: case llvm::Triple::thumbeb: case llvm::Triple::x86: case llvm::Triple::xcore: case llvm::Triple::amdil: case llvm::Triple::hsail: case llvm::Triple::spir: case llvm::Triple::dxil: // HLSL Change case llvm::Triple::kalimba: case llvm::Triple::shave: case llvm::Triple::wasm32: return 32; case llvm::Triple::aarch64: case llvm::Triple::aarch64_be: case llvm::Triple::amdgcn: case llvm::Triple::bpfel: case llvm::Triple::bpfeb: case llvm::Triple::le64: case llvm::Triple::mips64: case llvm::Triple::mips64el: case llvm::Triple::nvptx64: case llvm::Triple::ppc64: case llvm::Triple::ppc64le: case llvm::Triple::sparcv9: case llvm::Triple::systemz: case llvm::Triple::x86_64: case llvm::Triple::amdil64: case llvm::Triple::hsail64: case llvm::Triple::spir64: case llvm::Triple::dxil64: // HLSL Change case llvm::Triple::wasm64: return 64; } llvm_unreachable("Invalid architecture value"); } bool Triple::isArch64Bit() const { return getArchPointerBitWidth(getArch()) == 64; } bool Triple::isArch32Bit() const { return getArchPointerBitWidth(getArch()) == 32; } bool Triple::isArch16Bit() const { return getArchPointerBitWidth(getArch()) == 16; } Triple Triple::get32BitArchVariant() const { Triple T(*this); switch (getArch()) { case Triple::UnknownArch: case Triple::aarch64: case Triple::aarch64_be: case Triple::amdgcn: case Triple::bpfel: case Triple::bpfeb: case Triple::msp430: case Triple::systemz: case Triple::ppc64le: T.setArch(UnknownArch); break; case Triple::amdil: case Triple::hsail: case Triple::spir: case Triple::dxil: // HLSL Change case Triple::arm: case Triple::armeb: case Triple::hexagon: case Triple::kalimba: case Triple::le32: case Triple::mips: case Triple::mipsel: case Triple::nvptx: case Triple::ppc: case Triple::r600: case Triple::sparc: case Triple::sparcel: case Triple::tce: case Triple::thumb: case Triple::thumbeb: case Triple::x86: case Triple::xcore: case Triple::shave: case Triple::wasm32: // Already 32-bit. break; case Triple::le64: T.setArch(Triple::le32); break; case Triple::mips64: T.setArch(Triple::mips); break; case Triple::mips64el: T.setArch(Triple::mipsel); break; case Triple::nvptx64: T.setArch(Triple::nvptx); break; case Triple::ppc64: T.setArch(Triple::ppc); break; case Triple::sparcv9: T.setArch(Triple::sparc); break; case Triple::x86_64: T.setArch(Triple::x86); break; case Triple::amdil64: T.setArch(Triple::amdil); break; case Triple::hsail64: T.setArch(Triple::hsail); break; case Triple::spir64: T.setArch(Triple::spir); break; case Triple::dxil64: T.setArch(Triple::dxil); break; // HLSL Change case Triple::wasm64: T.setArch(Triple::wasm32); break; } return T; } Triple Triple::get64BitArchVariant() const { Triple T(*this); switch (getArch()) { case Triple::UnknownArch: case Triple::arm: case Triple::armeb: case Triple::hexagon: case Triple::kalimba: case Triple::msp430: case Triple::r600: case Triple::tce: case Triple::thumb: case Triple::thumbeb: case Triple::xcore: case Triple::sparcel: case Triple::shave: T.setArch(UnknownArch); break; case Triple::aarch64: case Triple::aarch64_be: case Triple::bpfel: case Triple::bpfeb: case Triple::le64: case Triple::amdil64: case Triple::amdgcn: case Triple::hsail64: case Triple::spir64: case Triple::dxil64: // HLSL Change case Triple::mips64: case Triple::mips64el: case Triple::nvptx64: case Triple::ppc64: case Triple::ppc64le: case Triple::sparcv9: case Triple::systemz: case Triple::x86_64: case Triple::wasm64: // Already 64-bit. break; case Triple::le32: T.setArch(Triple::le64); break; case Triple::mips: T.setArch(Triple::mips64); break; case Triple::mipsel: T.setArch(Triple::mips64el); break; case Triple::nvptx: T.setArch(Triple::nvptx64); break; case Triple::ppc: T.setArch(Triple::ppc64); break; case Triple::sparc: T.setArch(Triple::sparcv9); break; case Triple::x86: T.setArch(Triple::x86_64); break; case Triple::amdil: T.setArch(Triple::amdil64); break; case Triple::hsail: T.setArch(Triple::hsail64); break; case Triple::spir: T.setArch(Triple::spir64); break; case Triple::dxil: T.setArch(Triple::dxil64); break; // HLSL Change case Triple::wasm32: T.setArch(Triple::wasm64); break; } return T; } Triple Triple::getBigEndianArchVariant() const { Triple T(*this); switch (getArch()) { case Triple::UnknownArch: case Triple::amdgcn: case Triple::amdil64: case Triple::amdil: case Triple::hexagon: case Triple::hsail64: case Triple::hsail: case Triple::kalimba: case Triple::le32: case Triple::le64: case Triple::msp430: case Triple::nvptx64: case Triple::nvptx: case Triple::r600: case Triple::shave: case Triple::spir64: case Triple::spir: case Triple::dxil: // HLSL Change case Triple::dxil64: // HLSL Change case Triple::wasm32: case Triple::wasm64: case Triple::x86: case Triple::x86_64: case Triple::xcore: // ARM is intentionally unsupported here, changing the architecture would // drop any arch suffixes. case Triple::arm: case Triple::thumb: T.setArch(UnknownArch); break; case Triple::aarch64_be: case Triple::armeb: case Triple::bpfeb: case Triple::mips64: case Triple::mips: case Triple::ppc64: case Triple::ppc: case Triple::sparc: case Triple::sparcv9: case Triple::systemz: case Triple::tce: case Triple::thumbeb: // Already big endian. break; case Triple::aarch64: T.setArch(Triple::aarch64_be); break; case Triple::bpfel: T.setArch(Triple::bpfeb); break; case Triple::mips64el:T.setArch(Triple::mips64); break; case Triple::mipsel: T.setArch(Triple::mips); break; case Triple::ppc64le: T.setArch(Triple::ppc64); break; case Triple::sparcel: T.setArch(Triple::sparc); break; } return T; } Triple Triple::getLittleEndianArchVariant() const { Triple T(*this); switch (getArch()) { case Triple::UnknownArch: case Triple::ppc: case Triple::sparcv9: case Triple::systemz: case Triple::tce: // ARM is intentionally unsupported here, changing the architecture would // drop any arch suffixes. case Triple::armeb: case Triple::thumbeb: T.setArch(UnknownArch); break; case Triple::aarch64: case Triple::amdgcn: case Triple::amdil64: case Triple::amdil: case Triple::arm: case Triple::bpfel: case Triple::hexagon: case Triple::hsail64: case Triple::hsail: case Triple::kalimba: case Triple::le32: case Triple::le64: case Triple::mips64el: case Triple::mipsel: case Triple::msp430: case Triple::nvptx64: case Triple::nvptx: case Triple::ppc64le: case Triple::r600: case Triple::shave: case Triple::sparcel: case Triple::spir64: case Triple::spir: case Triple::dxil: // HLSL Change case Triple::dxil64: // HLSL Change case Triple::thumb: case Triple::wasm32: case Triple::wasm64: case Triple::x86: case Triple::x86_64: case Triple::xcore: // Already little endian. break; case Triple::aarch64_be: T.setArch(Triple::aarch64); break; case Triple::bpfeb: T.setArch(Triple::bpfel); break; case Triple::mips64: T.setArch(Triple::mips64el); break; case Triple::mips: T.setArch(Triple::mipsel); break; case Triple::ppc64: T.setArch(Triple::ppc64le); break; case Triple::sparc: T.setArch(Triple::sparcel); break; } return T; } const char *Triple::getARMCPUForArch(StringRef MArch) const { if (MArch.empty()) MArch = getArchName(); MArch = ARMTargetParser::getCanonicalArchName(MArch); // Some defaults are forced. switch (getOS()) { case llvm::Triple::FreeBSD: case llvm::Triple::NetBSD: if (!MArch.empty() && MArch == "v6") return "arm1176jzf-s"; break; case llvm::Triple::Win32: // FIXME: this is invalid for WindowsCE return "cortex-a9"; default: break; } if (MArch.empty()) return nullptr; const char *CPU = ARMTargetParser::getDefaultCPU(MArch); if (CPU) return CPU; // If no specific architecture version is requested, return the minimum CPU // required by the OS and environment. switch (getOS()) { case llvm::Triple::NetBSD: switch (getEnvironment()) { case llvm::Triple::GNUEABIHF: case llvm::Triple::GNUEABI: case llvm::Triple::EABIHF: case llvm::Triple::EABI: return "arm926ej-s"; default: return "strongarm"; } case llvm::Triple::NaCl: return "cortex-a8"; default: switch (getEnvironment()) { case llvm::Triple::EABIHF: case llvm::Triple::GNUEABIHF: return "arm1176jzf-s"; default: return "arm7tdmi"; } } llvm_unreachable("invalid arch name"); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/FileUtilities.cpp
//===- Support/FileUtilities.cpp - File System Utilities ------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a family of utility functions which are useful for doing // various things with files. // //===----------------------------------------------------------------------===// #include "llvm/Support/FileUtilities.h" #include "llvm/ADT/SmallString.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" #include <cctype> #include <cstdlib> #include <cstring> #include <system_error> using namespace llvm; static bool isSignedChar(char C) { return (C == '+' || C == '-'); } static bool isExponentChar(char C) { switch (C) { case 'D': // Strange exponential notation. case 'd': // Strange exponential notation. case 'e': case 'E': return true; default: return false; } } static bool isNumberChar(char C) { switch (C) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '.': return true; default: return isSignedChar(C) || isExponentChar(C); } } static const char *BackupNumber(const char *Pos, const char *FirstChar) { // If we didn't stop in the middle of a number, don't backup. if (!isNumberChar(*Pos)) return Pos; // Otherwise, return to the start of the number. bool HasPeriod = false; while (Pos > FirstChar && isNumberChar(Pos[-1])) { // Backup over at most one period. if (Pos[-1] == '.') { if (HasPeriod) break; HasPeriod = true; } --Pos; if (Pos > FirstChar && isSignedChar(Pos[0]) && !isExponentChar(Pos[-1])) break; } return Pos; } /// EndOfNumber - Return the first character that is not part of the specified /// number. This assumes that the buffer is null terminated, so it won't fall /// off the end. static const char *EndOfNumber(const char *Pos) { while (isNumberChar(*Pos)) ++Pos; return Pos; } /// CompareNumbers - compare two numbers, returning true if they are different. static bool CompareNumbers(const char *&F1P, const char *&F2P, const char *F1End, const char *F2End, double AbsTolerance, double RelTolerance, std::string *ErrorMsg) { const char *F1NumEnd, *F2NumEnd; double V1 = 0.0, V2 = 0.0; // If one of the positions is at a space and the other isn't, chomp up 'til // the end of the space. while (isspace(static_cast<unsigned char>(*F1P)) && F1P != F1End) ++F1P; while (isspace(static_cast<unsigned char>(*F2P)) && F2P != F2End) ++F2P; // If we stop on numbers, compare their difference. if (!isNumberChar(*F1P) || !isNumberChar(*F2P)) { // The diff failed. F1NumEnd = F1P; F2NumEnd = F2P; } else { // Note that some ugliness is built into this to permit support for numbers // that use "D" or "d" as their exponential marker, e.g. "1.234D45". This // occurs in 200.sixtrack in spec2k. V1 = strtod(F1P, const_cast<char**>(&F1NumEnd)); V2 = strtod(F2P, const_cast<char**>(&F2NumEnd)); if (*F1NumEnd == 'D' || *F1NumEnd == 'd') { // Copy string into tmp buffer to replace the 'D' with an 'e'. SmallString<200> StrTmp(F1P, EndOfNumber(F1NumEnd)+1); // Strange exponential notation! StrTmp[static_cast<unsigned>(F1NumEnd-F1P)] = 'e'; V1 = strtod(&StrTmp[0], const_cast<char**>(&F1NumEnd)); F1NumEnd = F1P + (F1NumEnd-&StrTmp[0]); } if (*F2NumEnd == 'D' || *F2NumEnd == 'd') { // Copy string into tmp buffer to replace the 'D' with an 'e'. SmallString<200> StrTmp(F2P, EndOfNumber(F2NumEnd)+1); // Strange exponential notation! StrTmp[static_cast<unsigned>(F2NumEnd-F2P)] = 'e'; V2 = strtod(&StrTmp[0], const_cast<char**>(&F2NumEnd)); F2NumEnd = F2P + (F2NumEnd-&StrTmp[0]); } } if (F1NumEnd == F1P || F2NumEnd == F2P) { if (ErrorMsg) { *ErrorMsg = "FP Comparison failed, not a numeric difference between '"; *ErrorMsg += F1P[0]; *ErrorMsg += "' and '"; *ErrorMsg += F2P[0]; *ErrorMsg += "'"; } return true; } // Check to see if these are inside the absolute tolerance if (AbsTolerance < std::abs(V1-V2)) { // Nope, check the relative tolerance... double Diff; if (V2) Diff = std::abs(V1/V2 - 1.0); else if (V1) Diff = std::abs(V2/V1 - 1.0); else Diff = 0; // Both zero. if (Diff > RelTolerance) { if (ErrorMsg) { raw_string_ostream(*ErrorMsg) << "Compared: " << V1 << " and " << V2 << '\n' << "abs. diff = " << std::abs(V1-V2) << " rel.diff = " << Diff << '\n' << "Out of tolerance: rel/abs: " << RelTolerance << '/' << AbsTolerance; } return true; } } // Otherwise, advance our read pointers to the end of the numbers. F1P = F1NumEnd; F2P = F2NumEnd; return false; } /// DiffFilesWithTolerance - Compare the two files specified, returning 0 if the /// files match, 1 if they are different, and 2 if there is a file error. This /// function differs from DiffFiles in that you can specify an absolete and /// relative FP error that is allowed to exist. If you specify a string to fill /// in for the error option, it will set the string to an error message if an /// error occurs, allowing the caller to distinguish between a failed diff and a /// file system error. /// int llvm::DiffFilesWithTolerance(StringRef NameA, StringRef NameB, double AbsTol, double RelTol, std::string *Error) { // Now its safe to mmap the files into memory because both files // have a non-zero size. ErrorOr<std::unique_ptr<MemoryBuffer>> F1OrErr = MemoryBuffer::getFile(NameA); if (std::error_code EC = F1OrErr.getError()) { if (Error) *Error = EC.message(); return 2; } MemoryBuffer &F1 = *F1OrErr.get(); ErrorOr<std::unique_ptr<MemoryBuffer>> F2OrErr = MemoryBuffer::getFile(NameB); if (std::error_code EC = F2OrErr.getError()) { if (Error) *Error = EC.message(); return 2; } MemoryBuffer &F2 = *F2OrErr.get(); // Okay, now that we opened the files, scan them for the first difference. const char *File1Start = F1.getBufferStart(); const char *File2Start = F2.getBufferStart(); const char *File1End = F1.getBufferEnd(); const char *File2End = F2.getBufferEnd(); const char *F1P = File1Start; const char *F2P = File2Start; uint64_t A_size = F1.getBufferSize(); uint64_t B_size = F2.getBufferSize(); // Are the buffers identical? Common case: Handle this efficiently. if (A_size == B_size && std::memcmp(File1Start, File2Start, A_size) == 0) return 0; // Otherwise, we are done a tolerances are set. if (AbsTol == 0 && RelTol == 0) { if (Error) *Error = "Files differ without tolerance allowance"; return 1; // Files different! } bool CompareFailed = false; while (1) { // Scan for the end of file or next difference. while (F1P < File1End && F2P < File2End && *F1P == *F2P) ++F1P, ++F2P; if (F1P >= File1End || F2P >= File2End) break; // Okay, we must have found a difference. Backup to the start of the // current number each stream is at so that we can compare from the // beginning. F1P = BackupNumber(F1P, File1Start); F2P = BackupNumber(F2P, File2Start); // Now that we are at the start of the numbers, compare them, exiting if // they don't match. if (CompareNumbers(F1P, F2P, File1End, File2End, AbsTol, RelTol, Error)) { CompareFailed = true; break; } } // Okay, we reached the end of file. If both files are at the end, we // succeeded. bool F1AtEnd = F1P >= File1End; bool F2AtEnd = F2P >= File2End; if (!CompareFailed && (!F1AtEnd || !F2AtEnd)) { // Else, we might have run off the end due to a number: backup and retry. if (F1AtEnd && isNumberChar(F1P[-1])) --F1P; if (F2AtEnd && isNumberChar(F2P[-1])) --F2P; F1P = BackupNumber(F1P, File1Start); F2P = BackupNumber(F2P, File2Start); // Now that we are at the start of the numbers, compare them, exiting if // they don't match. if (CompareNumbers(F1P, F2P, File1End, File2End, AbsTol, RelTol, Error)) CompareFailed = true; // If we found the end, we succeeded. if (F1P < File1End || F2P < File2End) CompareFailed = true; } return CompareFailed; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/regengine.inc
/*- * This code is derived from OpenBSD's libc/regex, original license follows: * * Copyright (c) 1992, 1993, 1994 Henry Spencer. * Copyright (c) 1992, 1993, 1994 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Henry Spencer. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)engine.c 8.5 (Berkeley) 3/20/94 */ /* * The matching engine and friends. This file is #included by regexec.c * after suitable #defines of a variety of macros used herein, so that * different state representations can be used without duplicating masses * of code. */ #ifdef SNAMES #define matcher smatcher #define fast sfast #define slow sslow #define dissect sdissect #define backref sbackref #define step sstep #define print sprint #define at sat #define match smat #define nope snope #endif #ifdef LNAMES #define matcher lmatcher #define fast lfast #define slow lslow #define dissect ldissect #define backref lbackref #define step lstep #define print lprint #define at lat #define match lmat #define nope lnope #endif /* another structure passed up and down to avoid zillions of parameters */ struct match { struct re_guts *g; int eflags; llvm_regmatch_t *pmatch; /* [nsub+1] (0 element unused) */ const char *offp; /* offsets work from here */ const char *beginp; /* start of string -- virtual NUL precedes */ const char *endp; /* end of string -- virtual NUL here */ const char *coldp; /* can be no match starting before here */ const char **lastpos; /* [nplus+1] */ STATEVARS; states st; /* current states */ states fresh; /* states for a fresh start */ states tmp; /* temporary */ states empty; /* empty set of states */ }; static int matcher(struct re_guts *, const char *, size_t, llvm_regmatch_t[], int); static const char *dissect(struct match *, const char *, const char *, sopno, sopno); static const char *backref(struct match *, const char *, const char *, sopno, sopno, sopno, int); static const char *fast(struct match *, const char *, const char *, sopno, sopno); static const char *slow(struct match *, const char *, const char *, sopno, sopno); static states step(struct re_guts *, sopno, sopno, states, int, states); #define MAX_RECURSION 100 #define BOL (OUT+1) #define EOL (BOL+1) #define BOLEOL (BOL+2) #define NOTHING (BOL+3) #define BOW (BOL+4) #define EOW (BOL+5) #define CODEMAX (BOL+5) /* highest code used */ #define NONCHAR(c) ((c) > CHAR_MAX) #define NNONCHAR (CODEMAX-CHAR_MAX) #ifdef REDEBUG static void print(struct match *, char *, states, int, FILE *); #endif #ifdef REDEBUG static void at(struct match *, char *, char *, char *, sopno, sopno); #endif #ifdef REDEBUG static char *pchar(int); #endif #ifdef REDEBUG #define SP(t, s, c) print(m, t, s, c, stdout) #define AT(t, p1, p2, s1, s2) at(m, t, p1, p2, s1, s2) #define NOTE(str) { if (m->eflags&REG_TRACE) (void)printf("=%s\n", (str)); } static int nope = 0; #else #define SP(t, s, c) /* nothing */ #define AT(t, p1, p2, s1, s2) /* nothing */ #define NOTE(s) /* nothing */ #endif /* - matcher - the actual matching engine */ static int /* 0 success, REG_NOMATCH failure */ matcher(struct re_guts *g, const char *string, size_t nmatch, llvm_regmatch_t pmatch[], int eflags) { const char *endp; size_t i; struct match mv; struct match *m = &mv; const char *dp; const sopno gf = g->firststate+1; /* +1 for OEND */ const sopno gl = g->laststate; const char *start; const char *stop; /* simplify the situation where possible */ if (g->cflags&REG_NOSUB) nmatch = 0; if (eflags&REG_STARTEND) { start = string + pmatch[0].rm_so; stop = string + pmatch[0].rm_eo; } else { start = string; stop = start + strlen(start); } if (stop < start) return(REG_INVARG); /* prescreening; this does wonders for this rather slow code */ if (g->must != NULL) { for (dp = start; dp < stop; dp++) if (*dp == g->must[0] && stop - dp >= g->mlen && memcmp(dp, g->must, (size_t)g->mlen) == 0) break; if (dp == stop) /* we didn't find g->must */ return(REG_NOMATCH); } /* match struct setup */ m->g = g; m->eflags = eflags; m->pmatch = NULL; m->lastpos = NULL; m->offp = string; m->beginp = start; m->endp = stop; STATESETUP(m, 4); SETUP(m->st); SETUP(m->fresh); SETUP(m->tmp); SETUP(m->empty); CLEAR(m->empty); /* this loop does only one repetition except for backrefs */ for (;;) { endp = fast(m, start, stop, gf, gl); if (endp == NULL) { /* a miss */ regex_free(m->pmatch); // HLSL Change: Use custom allocator regex_free((void*)m->lastpos); // HLSL Change: Use custom allocator STATETEARDOWN(m); return(REG_NOMATCH); } if (nmatch == 0 && !g->backrefs) break; /* no further info needed */ /* where? */ assert(m->coldp != NULL); for (;;) { NOTE("finding start"); endp = slow(m, m->coldp, stop, gf, gl); if (endp != NULL) break; assert(m->coldp < m->endp); m->coldp++; } if (nmatch == 1 && !g->backrefs) break; /* no further info needed */ /* oh my, they want the subexpressions... */ if (m->pmatch == NULL) m->pmatch = (llvm_regmatch_t *)regex_malloc((m->g->nsub + 1) * // HLSL Change: Use custom allocator sizeof(llvm_regmatch_t)); if (m->pmatch == NULL) { STATETEARDOWN(m); return(REG_ESPACE); } for (i = 1; i <= m->g->nsub; i++) m->pmatch[i].rm_so = m->pmatch[i].rm_eo = -1; if (!g->backrefs && !(m->eflags&REG_BACKR)) { NOTE("dissecting"); dp = dissect(m, m->coldp, endp, gf, gl); } else { if (g->nplus > 0 && m->lastpos == NULL) m->lastpos = (const char **)regex_malloc((g->nplus+1) * // HLSL Change: Use custom allocator sizeof(char *)); if (g->nplus > 0 && m->lastpos == NULL) { regex_free(m->pmatch); // HLSL Change: Use custom allocator STATETEARDOWN(m); return(REG_ESPACE); } NOTE("backref dissect"); dp = backref(m, m->coldp, endp, gf, gl, (sopno)0, 0); } if (dp != NULL) break; /* uh-oh... we couldn't find a subexpression-level match */ assert(g->backrefs); /* must be back references doing it */ assert(g->nplus == 0 || m->lastpos != NULL); for (;;) { if (dp != NULL || endp <= m->coldp) break; /* defeat */ NOTE("backoff"); endp = slow(m, m->coldp, endp-1, gf, gl); if (endp == NULL) break; /* defeat */ /* try it on a shorter possibility */ #ifndef NDEBUG for (i = 1; i <= m->g->nsub; i++) { assert(m->pmatch[i].rm_so == -1); assert(m->pmatch[i].rm_eo == -1); } #endif NOTE("backoff dissect"); dp = backref(m, m->coldp, endp, gf, gl, (sopno)0, 0); } assert(dp == NULL || dp == endp); if (dp != NULL) /* found a shorter one */ break; /* despite initial appearances, there is no match here */ NOTE("false alarm"); if (m->coldp == stop) break; start = m->coldp + 1; /* recycle starting later */ } /* fill in the details if requested */ if (nmatch > 0) { pmatch[0].rm_so = m->coldp - m->offp; pmatch[0].rm_eo = endp - m->offp; } if (nmatch > 1) { assert(m->pmatch != NULL); for (i = 1; i < nmatch; i++) if (i <= m->g->nsub) pmatch[i] = m->pmatch[i]; else { pmatch[i].rm_so = -1; pmatch[i].rm_eo = -1; } } if (m->pmatch != NULL) regex_free((char *)m->pmatch); // HLSL Change: Use custom allocator if (m->lastpos != NULL) regex_free((char *)m->lastpos); // HLSL Change: Use custom allocator STATETEARDOWN(m); return(0); } /* - dissect - figure out what matched what, no back references */ static const char * /* == stop (success) always */ dissect(struct match *m, const char *start, const char *stop, sopno startst, sopno stopst) { int i; sopno ss; /* start sop of current subRE */ sopno es; /* end sop of current subRE */ const char *sp; /* start of string matched by it */ const char *stp; /* string matched by it cannot pass here */ const char *rest; /* start of rest of string */ const char *tail; /* string unmatched by rest of RE */ sopno ssub; /* start sop of subsubRE */ sopno esub; /* end sop of subsubRE */ const char *ssp; /* start of string matched by subsubRE */ const char *sep; /* end of string matched by subsubRE */ const char *oldssp; /* previous ssp */ AT("diss", start, stop, startst, stopst); sp = start; for (ss = startst; ss < stopst; ss = es) { /* identify end of subRE */ es = ss; switch (OP(m->g->strip[es])) { case OPLUS_: case OQUEST_: es += OPND(m->g->strip[es]); break; case OCH_: while (OP(m->g->strip[es]) != O_CH) es += OPND(m->g->strip[es]); break; } es++; /* figure out what it matched */ switch (OP(m->g->strip[ss])) { case OEND: assert(nope); break; case OCHAR: sp++; break; case OBOL: case OEOL: case OBOW: case OEOW: break; case OANY: case OANYOF: sp++; break; case OBACK_: case O_BACK: assert(nope); break; /* cases where length of match is hard to find */ case OQUEST_: stp = stop; for (;;) { /* how long could this one be? */ rest = slow(m, sp, stp, ss, es); assert(rest != NULL); /* it did match */ /* could the rest match the rest? */ tail = slow(m, rest, stop, es, stopst); if (tail == stop) break; /* yes! */ /* no -- try a shorter match for this one */ stp = rest - 1; assert(stp >= sp); /* it did work */ } ssub = ss + 1; esub = es - 1; /* did innards match? */ if (slow(m, sp, rest, ssub, esub) != NULL) { const char *dp = dissect(m, sp, rest, ssub, esub); (void)dp; /* avoid warning if assertions off */ assert(dp == rest); } else /* no */ assert(sp == rest); sp = rest; break; case OPLUS_: stp = stop; for (;;) { /* how long could this one be? */ rest = slow(m, sp, stp, ss, es); assert(rest != NULL); /* it did match */ /* could the rest match the rest? */ tail = slow(m, rest, stop, es, stopst); if (tail == stop) break; /* yes! */ /* no -- try a shorter match for this one */ stp = rest - 1; assert(stp >= sp); /* it did work */ } ssub = ss + 1; esub = es - 1; ssp = sp; oldssp = ssp; for (;;) { /* find last match of innards */ sep = slow(m, ssp, rest, ssub, esub); if (sep == NULL || sep == ssp) break; /* failed or matched null */ oldssp = ssp; /* on to next try */ ssp = sep; } if (sep == NULL) { /* last successful match */ sep = ssp; ssp = oldssp; } assert(sep == rest); /* must exhaust substring */ assert(slow(m, ssp, sep, ssub, esub) == rest); { const char *dp = dissect(m, ssp, sep, ssub, esub); (void)dp; /* avoid warning if assertions off */ assert(dp == sep); } sp = rest; break; case OCH_: stp = stop; for (;;) { /* how long could this one be? */ rest = slow(m, sp, stp, ss, es); assert(rest != NULL); /* it did match */ /* could the rest match the rest? */ tail = slow(m, rest, stop, es, stopst); if (tail == stop) break; /* yes! */ /* no -- try a shorter match for this one */ stp = rest - 1; assert(stp >= sp); /* it did work */ } ssub = ss + 1; esub = ss + OPND(m->g->strip[ss]) - 1; assert(OP(m->g->strip[esub]) == OOR1); for (;;) { /* find first matching branch */ if (slow(m, sp, rest, ssub, esub) == rest) break; /* it matched all of it */ /* that one missed, try next one */ assert(OP(m->g->strip[esub]) == OOR1); esub++; assert(OP(m->g->strip[esub]) == OOR2); ssub = esub + 1; esub += OPND(m->g->strip[esub]); if (OP(m->g->strip[esub]) == OOR2) esub--; else assert(OP(m->g->strip[esub]) == O_CH); } { const char *dp = dissect(m, sp, rest, ssub, esub); (void)dp; /* avoid warning if assertions off */ assert(dp == rest); } sp = rest; break; case O_PLUS: case O_QUEST: case OOR1: case OOR2: case O_CH: assert(nope); break; case OLPAREN: i = OPND(m->g->strip[ss]); assert(0 < i && i <= m->g->nsub); m->pmatch[i].rm_so = sp - m->offp; break; case ORPAREN: i = OPND(m->g->strip[ss]); assert(0 < i && i <= m->g->nsub); m->pmatch[i].rm_eo = sp - m->offp; break; default: /* uh oh */ assert(nope); break; } } assert(sp == stop); return(sp); } /* - backref - figure out what matched what, figuring in back references */ static const char * /* == stop (success) or NULL (failure) */ backref(struct match *m, const char *start, const char *stop, sopno startst, sopno stopst, sopno lev, int rec) /* PLUS nesting level */ { int i; sopno ss; /* start sop of current subRE */ const char *sp; /* start of string matched by it */ sopno ssub; /* start sop of subsubRE */ sopno esub; /* end sop of subsubRE */ const char *ssp; /* start of string matched by subsubRE */ const char *dp; size_t len; int hard; sop s; llvm_regoff_t offsave; cset *cs; AT("back", start, stop, startst, stopst); sp = start; /* get as far as we can with easy stuff */ hard = 0; for (ss = startst; !hard && ss < stopst; ss++) switch (OP(s = m->g->strip[ss])) { case OCHAR: if (sp == stop || *sp++ != (char)OPND(s)) return(NULL); break; case OANY: if (sp == stop) return(NULL); sp++; break; case OANYOF: cs = &m->g->sets[OPND(s)]; if (sp == stop || !CHIN(cs, *sp++)) return(NULL); break; case OBOL: if ( (sp == m->beginp && !(m->eflags&REG_NOTBOL)) || (sp < m->endp && *(sp-1) == '\n' && (m->g->cflags&REG_NEWLINE)) ) { /* yes */ } else return(NULL); break; case OEOL: if ( (sp == m->endp && !(m->eflags&REG_NOTEOL)) || (sp < m->endp && *sp == '\n' && (m->g->cflags&REG_NEWLINE)) ) { /* yes */ } else return(NULL); break; case OBOW: if (( (sp == m->beginp && !(m->eflags&REG_NOTBOL)) || (sp < m->endp && *(sp-1) == '\n' && (m->g->cflags&REG_NEWLINE)) || (sp > m->beginp && !ISWORD(*(sp-1))) ) && (sp < m->endp && ISWORD(*sp)) ) { /* yes */ } else return(NULL); break; case OEOW: if (( (sp == m->endp && !(m->eflags&REG_NOTEOL)) || (sp < m->endp && *sp == '\n' && (m->g->cflags&REG_NEWLINE)) || (sp < m->endp && !ISWORD(*sp)) ) && (sp > m->beginp && ISWORD(*(sp-1))) ) { /* yes */ } else return(NULL); break; case O_QUEST: break; case OOR1: /* matches null but needs to skip */ ss++; s = m->g->strip[ss]; do { assert(OP(s) == OOR2); ss += OPND(s); } while (OP(s = m->g->strip[ss]) != O_CH); /* note that the ss++ gets us past the O_CH */ break; default: /* have to make a choice */ hard = 1; break; } if (!hard) { /* that was it! */ if (sp != stop) return(NULL); return(sp); } ss--; /* adjust for the for's final increment */ /* the hard stuff */ AT("hard", sp, stop, ss, stopst); s = m->g->strip[ss]; switch (OP(s)) { case OBACK_: /* the vilest depths */ i = OPND(s); assert(0 < i && i <= m->g->nsub); if (m->pmatch[i].rm_eo == -1) return(NULL); assert(m->pmatch[i].rm_so != -1); len = m->pmatch[i].rm_eo - m->pmatch[i].rm_so; if (len == 0 && rec++ > MAX_RECURSION) return(NULL); assert(stop - m->beginp >= len); if (sp > stop - len) return(NULL); /* not enough left to match */ ssp = m->offp + m->pmatch[i].rm_so; if (memcmp(sp, ssp, len) != 0) return(NULL); while (m->g->strip[ss] != SOP(O_BACK, i)) ss++; return(backref(m, sp+len, stop, ss+1, stopst, lev, rec)); break; case OQUEST_: /* to null or not */ dp = backref(m, sp, stop, ss+1, stopst, lev, rec); if (dp != NULL) return(dp); /* not */ return(backref(m, sp, stop, ss+OPND(s)+1, stopst, lev, rec)); break; case OPLUS_: assert(m->lastpos != NULL); assert(lev+1 <= m->g->nplus); m->lastpos[lev+1] = sp; return(backref(m, sp, stop, ss+1, stopst, lev+1, rec)); break; case O_PLUS: if (sp == m->lastpos[lev]) /* last pass matched null */ return(backref(m, sp, stop, ss+1, stopst, lev-1, rec)); /* try another pass */ m->lastpos[lev] = sp; dp = backref(m, sp, stop, ss-OPND(s)+1, stopst, lev, rec); if (dp == NULL) return(backref(m, sp, stop, ss+1, stopst, lev-1, rec)); else return(dp); break; case OCH_: /* find the right one, if any */ ssub = ss + 1; esub = ss + OPND(s) - 1; assert(OP(m->g->strip[esub]) == OOR1); for (;;) { /* find first matching branch */ dp = backref(m, sp, stop, ssub, esub, lev, rec); if (dp != NULL) return(dp); /* that one missed, try next one */ if (OP(m->g->strip[esub]) == O_CH) return(NULL); /* there is none */ esub++; assert(OP(m->g->strip[esub]) == OOR2); ssub = esub + 1; esub += OPND(m->g->strip[esub]); if (OP(m->g->strip[esub]) == OOR2) esub--; else assert(OP(m->g->strip[esub]) == O_CH); } break; case OLPAREN: /* must undo assignment if rest fails */ i = OPND(s); assert(0 < i && i <= m->g->nsub); offsave = m->pmatch[i].rm_so; m->pmatch[i].rm_so = sp - m->offp; dp = backref(m, sp, stop, ss+1, stopst, lev, rec); if (dp != NULL) return(dp); m->pmatch[i].rm_so = offsave; return(NULL); break; case ORPAREN: /* must undo assignment if rest fails */ i = OPND(s); assert(0 < i && i <= m->g->nsub); offsave = m->pmatch[i].rm_eo; m->pmatch[i].rm_eo = sp - m->offp; dp = backref(m, sp, stop, ss+1, stopst, lev, rec); if (dp != NULL) return(dp); m->pmatch[i].rm_eo = offsave; return(NULL); break; default: /* uh oh */ assert(nope); break; } /* "can't happen" */ assert(nope); /* NOTREACHED */ return NULL; } /* - fast - step through the string at top speed */ static const char * /* where tentative match ended, or NULL */ fast(struct match *m, const char *start, const char *stop, sopno startst, sopno stopst) { states st = m->st; states fresh = m->fresh; states tmp = m->tmp; const char *p = start; int c = (start == m->beginp) ? OUT : *(start-1); int lastc; /* previous c */ int flagch; int i; const char *coldp; /* last p after which no match was underway */ CLEAR(st); SET1(st, startst); st = step(m->g, startst, stopst, st, NOTHING, st); ASSIGN(fresh, st); SP("start", st, *p); coldp = NULL; for (;;) { /* next character */ lastc = c; c = (p == m->endp) ? OUT : *p; if (EQ(st, fresh)) coldp = p; /* is there an EOL and/or BOL between lastc and c? */ flagch = '\0'; i = 0; if ( (lastc == '\n' && m->g->cflags&REG_NEWLINE) || (lastc == OUT && !(m->eflags&REG_NOTBOL)) ) { flagch = BOL; i = m->g->nbol; } if ( (c == '\n' && m->g->cflags&REG_NEWLINE) || (c == OUT && !(m->eflags&REG_NOTEOL)) ) { flagch = (flagch == BOL) ? BOLEOL : EOL; i += m->g->neol; } if (i != 0) { for (; i > 0; i--) st = step(m->g, startst, stopst, st, flagch, st); SP("boleol", st, c); } /* how about a word boundary? */ if ( (flagch == BOL || (lastc != OUT && !ISWORD(lastc))) && (c != OUT && ISWORD(c)) ) { flagch = BOW; } if ( (lastc != OUT && ISWORD(lastc)) && (flagch == EOL || (c != OUT && !ISWORD(c))) ) { flagch = EOW; } if (flagch == BOW || flagch == EOW) { st = step(m->g, startst, stopst, st, flagch, st); SP("boweow", st, c); } /* are we done? */ if (ISSET(st, stopst) || p == stop) break; /* NOTE BREAK OUT */ /* no, we must deal with this character */ ASSIGN(tmp, st); ASSIGN(st, fresh); assert(c != OUT); st = step(m->g, startst, stopst, tmp, c, st); SP("aft", st, c); assert(EQ(step(m->g, startst, stopst, st, NOTHING, st), st)); p++; } assert(coldp != NULL); m->coldp = coldp; if (ISSET(st, stopst)) return(p+1); else return(NULL); } /* - slow - step through the string more deliberately */ static const char * /* where it ended */ slow(struct match *m, const char *start, const char *stop, sopno startst, sopno stopst) { states st = m->st; states empty = m->empty; states tmp = m->tmp; const char *p = start; int c = (start == m->beginp) ? OUT : *(start-1); int lastc; /* previous c */ int flagch; int i; const char *matchp; /* last p at which a match ended */ AT("slow", start, stop, startst, stopst); CLEAR(st); SET1(st, startst); SP("sstart", st, *p); st = step(m->g, startst, stopst, st, NOTHING, st); matchp = NULL; for (;;) { /* next character */ lastc = c; c = (p == m->endp) ? OUT : *p; /* is there an EOL and/or BOL between lastc and c? */ flagch = '\0'; i = 0; if ( (lastc == '\n' && m->g->cflags&REG_NEWLINE) || (lastc == OUT && !(m->eflags&REG_NOTBOL)) ) { flagch = BOL; i = m->g->nbol; } if ( (c == '\n' && m->g->cflags&REG_NEWLINE) || (c == OUT && !(m->eflags&REG_NOTEOL)) ) { flagch = (flagch == BOL) ? BOLEOL : EOL; i += m->g->neol; } if (i != 0) { for (; i > 0; i--) st = step(m->g, startst, stopst, st, flagch, st); SP("sboleol", st, c); } /* how about a word boundary? */ if ( (flagch == BOL || (lastc != OUT && !ISWORD(lastc))) && (c != OUT && ISWORD(c)) ) { flagch = BOW; } if ( (lastc != OUT && ISWORD(lastc)) && (flagch == EOL || (c != OUT && !ISWORD(c))) ) { flagch = EOW; } if (flagch == BOW || flagch == EOW) { st = step(m->g, startst, stopst, st, flagch, st); SP("sboweow", st, c); } /* are we done? */ if (ISSET(st, stopst)) matchp = p; if (EQ(st, empty) || p == stop) break; /* NOTE BREAK OUT */ /* no, we must deal with this character */ ASSIGN(tmp, st); ASSIGN(st, empty); assert(c != OUT); st = step(m->g, startst, stopst, tmp, c, st); SP("saft", st, c); assert(EQ(step(m->g, startst, stopst, st, NOTHING, st), st)); p++; } return(matchp); } /* - step - map set of states reachable before char to set reachable after */ static states step(struct re_guts *g, sopno start, /* start state within strip */ sopno stop, /* state after stop state within strip */ states bef, /* states reachable before */ int ch, /* character or NONCHAR code */ states aft) /* states already known reachable after */ { cset *cs; sop s; sopno pc; onestate here; /* note, macros know this name */ sopno look; int i; for (pc = start, INIT(here, pc); pc != stop; pc++, INC(here)) { s = g->strip[pc]; switch (OP(s)) { case OEND: assert(pc == stop-1); break; case OCHAR: /* only characters can match */ assert(!NONCHAR(ch) || ch != (char)OPND(s)); if (ch == (char)OPND(s)) FWD(aft, bef, 1); break; case OBOL: if (ch == BOL || ch == BOLEOL) FWD(aft, bef, 1); break; case OEOL: if (ch == EOL || ch == BOLEOL) FWD(aft, bef, 1); break; case OBOW: if (ch == BOW) FWD(aft, bef, 1); break; case OEOW: if (ch == EOW) FWD(aft, bef, 1); break; case OANY: if (!NONCHAR(ch)) FWD(aft, bef, 1); break; case OANYOF: cs = &g->sets[OPND(s)]; if (!NONCHAR(ch) && CHIN(cs, ch)) FWD(aft, bef, 1); break; case OBACK_: /* ignored here */ case O_BACK: FWD(aft, aft, 1); break; case OPLUS_: /* forward, this is just an empty */ FWD(aft, aft, 1); break; case O_PLUS: /* both forward and back */ FWD(aft, aft, 1); i = ISSETBACK(aft, OPND(s)); BACK(aft, aft, OPND(s)); if (!i && ISSETBACK(aft, OPND(s))) { /* oho, must reconsider loop body */ pc -= OPND(s) + 1; INIT(here, pc); } break; case OQUEST_: /* two branches, both forward */ FWD(aft, aft, 1); FWD(aft, aft, OPND(s)); break; case O_QUEST: /* just an empty */ FWD(aft, aft, 1); break; case OLPAREN: /* not significant here */ case ORPAREN: FWD(aft, aft, 1); break; case OCH_: /* mark the first two branches */ FWD(aft, aft, 1); assert(OP(g->strip[pc+OPND(s)]) == OOR2); FWD(aft, aft, OPND(s)); break; case OOR1: /* done a branch, find the O_CH */ if (ISSTATEIN(aft, here)) { for (look = 1; OP(s = g->strip[pc+look]) != O_CH; look += OPND(s)) assert(OP(s) == OOR2); FWD(aft, aft, look); } break; case OOR2: /* propagate OCH_'s marking */ FWD(aft, aft, 1); if (OP(g->strip[pc+OPND(s)]) != O_CH) { assert(OP(g->strip[pc+OPND(s)]) == OOR2); FWD(aft, aft, OPND(s)); } break; case O_CH: /* just empty */ FWD(aft, aft, 1); break; default: /* ooooops... */ assert(nope); break; } } return(aft); } #ifdef REDEBUG /* - print - print a set of states */ static void print(struct match *m, char *caption, states st, int ch, FILE *d) { struct re_guts *g = m->g; int i; int first = 1; if (!(m->eflags&REG_TRACE)) return; (void)fprintf(d, "%s", caption); if (ch != '\0') (void)fprintf(d, " %s", pchar(ch)); for (i = 0; i < g->nstates; i++) if (ISSET(st, i)) { (void)fprintf(d, "%s%d", (first) ? "\t" : ", ", i); first = 0; } (void)fprintf(d, "\n"); } /* - at - print current situation */ static void at(struct match *m, char *title, char *start, char *stop, sopno startst, sopno stopst) { if (!(m->eflags&REG_TRACE)) return; (void)printf("%s %s-", title, pchar(*start)); (void)printf("%s ", pchar(*stop)); (void)printf("%ld-%ld\n", (long)startst, (long)stopst); } #ifndef PCHARDONE #define PCHARDONE /* never again */ /* - pchar - make a character printable * * Is this identical to regchar() over in debug.c? Well, yes. But a * duplicate here avoids having a debugging-capable regexec.o tied to * a matching debug.o, and this is convenient. It all disappears in * the non-debug compilation anyway, so it doesn't matter much. */ static char * /* -> representation */ pchar(int ch) { static char pbuf[10]; if (isprint(ch) || ch == ' ') (void)snprintf(pbuf, sizeof pbuf, "%c", ch); else (void)snprintf(pbuf, sizeof pbuf, "\\%o", ch); return(pbuf); } #endif #endif #undef matcher #undef fast #undef slow #undef dissect #undef backref #undef step #undef print #undef at #undef match #undef nope
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/Statistic.cpp
//===-- Statistic.cpp - Easy way to expose stats information --------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the 'Statistic' class, which is designed to be an easy // way to expose various success metrics from passes. These statistics are // printed at the end of a run, when the -stats command line option is enabled // on the command line. // // This is useful for reporting information like the number of instructions // simplified, optimized or removed by various transformations, like this: // // static Statistic NumInstEliminated("GCSE", "Number of instructions killed"); // // Later, in the code: ++NumInstEliminated; // //===----------------------------------------------------------------------===// #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Format.h" #include "llvm/Support/ManagedStatic.h" #include "llvm/Support/Mutex.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> #include <cstring> using namespace llvm; // CreateInfoOutputFile - Return a file stream to print our output on. namespace llvm { extern raw_ostream *CreateInfoOutputFile(); } /// -stats - Command line option to cause transformations to emit stats about /// what they did. /// #if 0 // HLSL Change Starts - option pending static cl::opt<bool> Enabled( "stats", cl::desc("Enable statistics output from program (available with Asserts)")); #else static const bool Enabled = false; #endif // HLSL Change Ends namespace { /// StatisticInfo - This class is used in a ManagedStatic so that it is created /// on demand (when the first statistic is bumped) and destroyed only when /// llvm_shutdown is called. We print statistics from the destructor. class StatisticInfo { std::vector<const Statistic*> Stats; friend void llvm::PrintStatistics(); friend void llvm::PrintStatistics(raw_ostream &OS); public: ~StatisticInfo(); void addStatistic(const Statistic *S) { Stats.push_back(S); } }; } static ManagedStatic<StatisticInfo> StatInfo; static ManagedStatic<sys::SmartMutex<true> > StatLock; /// RegisterStatistic - The first time a statistic is bumped, this method is /// called. void Statistic::RegisterStatistic() { // If stats are enabled, inform StatInfo that this statistic should be // printed. sys::SmartScopedLock<true> Writer(*StatLock); if (!Initialized) { if (Enabled) StatInfo->addStatistic(this); TsanHappensBefore(this); sys::MemoryFence(); // Remember we have been registered. TsanIgnoreWritesBegin(); Initialized = true; TsanIgnoreWritesEnd(); } } // Print information when destroyed, iff command line option is specified. StatisticInfo::~StatisticInfo() { llvm::PrintStatistics(); } void llvm::EnableStatistics() { //Enabled.setValue(true); // HLSL Change } bool llvm::AreStatisticsEnabled() { return Enabled; } void llvm::PrintStatistics(raw_ostream &OS) { StatisticInfo &Stats = *StatInfo; // Figure out how long the biggest Value and Name fields are. unsigned MaxNameLen = 0, MaxValLen = 0; for (size_t i = 0, e = Stats.Stats.size(); i != e; ++i) { MaxValLen = std::max(MaxValLen, (unsigned)utostr(Stats.Stats[i]->getValue()).size()); MaxNameLen = std::max(MaxNameLen, (unsigned)std::strlen(Stats.Stats[i]->getName())); } // Sort the fields by name. std::stable_sort(Stats.Stats.begin(), Stats.Stats.end(), [](const Statistic *LHS, const Statistic *RHS) { if (int Cmp = std::strcmp(LHS->getName(), RHS->getName())) return Cmp < 0; // Secondary key is the description. return std::strcmp(LHS->getDesc(), RHS->getDesc()) < 0; }); // Print out the statistics header... OS << "===" << std::string(73, '-') << "===\n" << " ... Statistics Collected ...\n" << "===" << std::string(73, '-') << "===\n\n"; // Print all of the statistics. for (size_t i = 0, e = Stats.Stats.size(); i != e; ++i) OS << format("%*u %-*s - %s\n", MaxValLen, Stats.Stats[i]->getValue(), MaxNameLen, Stats.Stats[i]->getName(), Stats.Stats[i]->getDesc()); OS << '\n'; // Flush the output stream. OS.flush(); } void llvm::PrintStatistics() { #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS) StatisticInfo &Stats = *StatInfo; // Statistics not enabled? if (Stats.Stats.empty()) return; // Get the stream to write to. raw_ostream &OutStream = *CreateInfoOutputFile(); PrintStatistics(OutStream); delete &OutStream; // Close the file. #else // Check if the -stats option is set instead of checking // !Stats.Stats.empty(). In release builds, Statistics operators // do nothing, so stats are never Registered. if (Enabled) { // Get the stream to write to. raw_ostream &OutStream = *CreateInfoOutputFile(); OutStream << "Statistics are disabled. " << "Build with asserts or with -DLLVM_ENABLE_STATS\n"; OutStream.flush(); delete &OutStream; // Close the file. } #endif }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/DataStream.cpp
//===--- llvm/Support/DataStream.cpp - Lazy streamed data -----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements DataStreamer, which fetches bytes of Data from // a stream source. It provides support for streaming (lazy reading) of // bitcode. An example implementation of streaming from a file or stdin // is included. // //===----------------------------------------------------------------------===// #include "llvm/Support/DataStream.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Program.h" #include <string> #include <system_error> #if !defined(_MSC_VER) && !defined(__MINGW32__) #include <unistd.h> #else #include <io.h> #endif using namespace llvm; #define DEBUG_TYPE "Data-stream" // Interface goals: // * StreamingMemoryObject doesn't care about complexities like using // threads/async callbacks to actually overlap download+compile // * Don't want to duplicate Data in memory // * Don't need to know total Data len in advance // Non-goals: // StreamingMemoryObject already has random access so this interface only does // in-order streaming (no arbitrary seeking, else we'd have to buffer all the // Data here in addition to MemoryObject). This also means that if we want // to be able to to free Data, BitstreamBytes/BitcodeReader will implement it STATISTIC(NumStreamFetches, "Number of calls to Data stream fetch"); namespace llvm { DataStreamer::~DataStreamer() {} } namespace { // Very simple stream backed by a file. Mostly useful for stdin and debugging; // actual file access is probably still best done with mmap. class DataFileStreamer : public DataStreamer { int Fd; public: DataFileStreamer() : Fd(0) {} virtual ~DataFileStreamer() { llvm::sys::fs::msf_close(Fd); // HLSL Change - use msf_close } size_t GetBytes(unsigned char *buf, size_t len) override { NumStreamFetches++; return llvm::sys::fs::msf_read(Fd, buf, len); } std::error_code OpenFile(const std::string &Filename) { if (Filename == "-") { Fd = 0; sys::ChangeStdinToBinary(); return std::error_code(); } return sys::fs::openFileForRead(Filename, Fd); } }; } std::unique_ptr<DataStreamer> llvm::getDataFileStreamer(const std::string &Filename, std::string *StrError) { std::unique_ptr<DataFileStreamer> s = make_unique<DataFileStreamer>(); if (std::error_code e = s->OpenFile(Filename)) { *StrError = std::string("Could not open ") + Filename + ": " + e.message() + "\n"; return nullptr; } return s; // HLSL Change - Fix redundant move warning. }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/MathExtras.cpp
//===-- MathExtras.cpp - Implement the MathExtras header --------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the MathExtras.h header // //===----------------------------------------------------------------------===// #include "llvm/Support/MathExtras.h" #ifdef _MSC_VER #include <limits> #else #include <math.h> #endif namespace llvm { #if defined(_MSC_VER) // Visual Studio defines the HUGE_VAL class of macros using purposeful // constant arithmetic overflow, which it then warns on when encountered. const float huge_valf = std::numeric_limits<float>::infinity(); #else const float huge_valf = HUGE_VALF; #endif }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/DataExtractor.cpp
//===-- DataExtractor.cpp -------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Support/DataExtractor.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Host.h" #include "llvm/Support/SwapByteOrder.h" using namespace llvm; template <typename T> static T getU(uint32_t *offset_ptr, const DataExtractor *de, bool isLittleEndian, const char *Data) { T val = 0; uint32_t offset = *offset_ptr; if (de->isValidOffsetForDataOfSize(offset, sizeof(val))) { std::memcpy(&val, &Data[offset], sizeof(val)); if (sys::IsLittleEndianHost != isLittleEndian) sys::swapByteOrder(val); // Advance the offset *offset_ptr += sizeof(val); } return val; } template <typename T> static T *getUs(uint32_t *offset_ptr, T *dst, uint32_t count, const DataExtractor *de, bool isLittleEndian, const char *Data){ uint32_t offset = *offset_ptr; if (count > 0 && de->isValidOffsetForDataOfSize(offset, sizeof(*dst)*count)) { for (T *value_ptr = dst, *end = dst + count; value_ptr != end; ++value_ptr, offset += sizeof(*dst)) *value_ptr = getU<T>(offset_ptr, de, isLittleEndian, Data); // Advance the offset *offset_ptr = offset; // Return a non-NULL pointer to the converted data as an indicator of // success return dst; } return nullptr; } uint8_t DataExtractor::getU8(uint32_t *offset_ptr) const { return getU<uint8_t>(offset_ptr, this, IsLittleEndian, Data.data()); } uint8_t * DataExtractor::getU8(uint32_t *offset_ptr, uint8_t *dst, uint32_t count) const { return getUs<uint8_t>(offset_ptr, dst, count, this, IsLittleEndian, Data.data()); } uint16_t DataExtractor::getU16(uint32_t *offset_ptr) const { return getU<uint16_t>(offset_ptr, this, IsLittleEndian, Data.data()); } uint16_t *DataExtractor::getU16(uint32_t *offset_ptr, uint16_t *dst, uint32_t count) const { return getUs<uint16_t>(offset_ptr, dst, count, this, IsLittleEndian, Data.data()); } uint32_t DataExtractor::getU32(uint32_t *offset_ptr) const { return getU<uint32_t>(offset_ptr, this, IsLittleEndian, Data.data()); } uint32_t *DataExtractor::getU32(uint32_t *offset_ptr, uint32_t *dst, uint32_t count) const { return getUs<uint32_t>(offset_ptr, dst, count, this, IsLittleEndian, Data.data()); } uint64_t DataExtractor::getU64(uint32_t *offset_ptr) const { return getU<uint64_t>(offset_ptr, this, IsLittleEndian, Data.data()); } uint64_t *DataExtractor::getU64(uint32_t *offset_ptr, uint64_t *dst, uint32_t count) const { return getUs<uint64_t>(offset_ptr, dst, count, this, IsLittleEndian, Data.data()); } uint64_t DataExtractor::getUnsigned(uint32_t *offset_ptr, uint32_t byte_size) const { switch (byte_size) { case 1: return getU8(offset_ptr); case 2: return getU16(offset_ptr); case 4: return getU32(offset_ptr); case 8: return getU64(offset_ptr); } llvm_unreachable("getUnsigned unhandled case!"); } int64_t DataExtractor::getSigned(uint32_t *offset_ptr, uint32_t byte_size) const { switch (byte_size) { case 1: return (int8_t)getU8(offset_ptr); case 2: return (int16_t)getU16(offset_ptr); case 4: return (int32_t)getU32(offset_ptr); case 8: return (int64_t)getU64(offset_ptr); } llvm_unreachable("getSigned unhandled case!"); } const char *DataExtractor::getCStr(uint32_t *offset_ptr) const { uint32_t offset = *offset_ptr; StringRef::size_type pos = Data.find('\0', offset); if (pos != StringRef::npos) { *offset_ptr = pos + 1; return Data.data() + offset; } return nullptr; } uint64_t DataExtractor::getULEB128(uint32_t *offset_ptr) const { uint64_t result = 0; if (Data.empty()) return 0; unsigned shift = 0; uint32_t offset = *offset_ptr; uint8_t byte = 0; while (isValidOffset(offset)) { byte = Data[offset++]; result |= uint64_t(byte & 0x7f) << shift; shift += 7; if ((byte & 0x80) == 0) break; } *offset_ptr = offset; return result; } int64_t DataExtractor::getSLEB128(uint32_t *offset_ptr) const { int64_t result = 0; if (Data.empty()) return 0; unsigned shift = 0; uint32_t offset = *offset_ptr; uint8_t byte = 0; while (isValidOffset(offset)) { byte = Data[offset++]; result |= uint64_t(byte & 0x7f) << shift; shift += 7; if ((byte & 0x80) == 0) break; } // Sign bit of byte is 2nd high order bit (0x40) if (shift < 64 && (byte & 0x40)) result |= -(1ULL << shift); *offset_ptr = offset; return result; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/MSFileSystemBasic.cpp
//===- llvm/Support/Windows/MSFileSystemBasic.cpp DXComplier Impl *- C++ //-*-===// /////////////////////////////////////////////////////////////////////////////// // // // MSFileSystemBasic.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // This file implements the DXCompiler specific implementation of the Path // API.// // // /////////////////////////////////////////////////////////////////////////////// #ifdef _WIN32 #include "dxc/Support/WinIncludes.h" #include <assert.h> #include <d3dcommon.h> #include <errno.h> #include <fcntl.h> #include <io.h> #include <new> #include <stdint.h> #include <sys/stat.h> #include <sys/types.h> #include <unordered_map> #include "dxc/Support/Global.h" #include "llvm/Support/MSFileSystem.h" #include "dxc/dxcapi.internal.h" /////////////////////////////////////////////////////////////////////////////////////////////////// // Externally visible functions. /// <summary>Creates an implementation based on IDxcSystemAccess.</summary> HRESULT CreateMSFileSystemForIface(IUnknown *pService, ::llvm::sys::fs::MSFileSystem **pResult) throw(); /// <summary>Creates an implementation with no access to system /// resources.</summary> HRESULT CreateMSFileSystemBlocked(::llvm::sys::fs::MSFileSystem **pResult) throw(); /////////////////////////////////////////////////////////////////////////////////////////////////// // Helper functions. static DWORD WIN32_FROM_HRESULT(HRESULT hr) { if (SUCCEEDED(hr)) return ERROR_SUCCESS; if ((HRESULT)(hr & 0xFFFF0000) == MAKE_HRESULT(SEVERITY_ERROR, FACILITY_WIN32, 0)) { // Could have come from many values, but we choose this one return HRESULT_CODE(hr); } if (hr == E_OUTOFMEMORY) return ERROR_OUTOFMEMORY; if (hr == E_NOTIMPL) return ERROR_CALL_NOT_IMPLEMENTED; return ERROR_FUNCTION_FAILED; } static HRESULT CopyStatStg(const STATSTG *statStg, LPWIN32_FIND_DATAW lpFindFileData) { HRESULT hr = S_OK; lpFindFileData->dwFileAttributes = FILE_ATTRIBUTE_NORMAL; lpFindFileData->ftCreationTime = statStg->ctime; lpFindFileData->ftLastAccessTime = statStg->atime; lpFindFileData->ftLastWriteTime = statStg->mtime; lpFindFileData->nFileSizeLow = statStg->cbSize.LowPart; lpFindFileData->nFileSizeHigh = statStg->cbSize.HighPart; if (statStg->pwcsName != nullptr) { IFC(StringCchCopyW(lpFindFileData->cFileName, _countof(lpFindFileData->cFileName), statStg->pwcsName)); } Cleanup: return hr; } static void ClearStatStg(STATSTG *statStg) { DXASSERT_NOMSG(statStg != nullptr); if (statStg->pwcsName != nullptr) { CoTaskMemFree(statStg->pwcsName); statStg->pwcsName = nullptr; } } /////////////////////////////////////////////////////////////////////////////////////////////////// // IDxcSystemAccess-based MSFileSystem implementation. struct MSFileSystemHandle { enum MSFileSystemHandleKind { MSFileSystemHandleKind_FindHandle, MSFileSystemHandleKind_FileHandle, MSFileSystemHandleKind_FileMappingHandle }; MSFileSystemHandleKind kind; CComPtr<IUnknown> storage; // For a file handle, the stream or directory handle. // For a find handle, the IEnumSTATSTG associated. CComPtr<IStream> stream; // For a file or console file handle, the stream interface. int fd; // For a file handle, its file descriptor. explicit MSFileSystemHandle(int knownFD) : kind(MSFileSystemHandleKind_FileHandle), fd(knownFD) {} explicit MSFileSystemHandle(IUnknown *pMapping) : kind(MSFileSystemHandleKind_FileMappingHandle), storage(pMapping), fd(0) {} MSFileSystemHandle(IUnknown *pStorage, IStream *pStream) : kind(MSFileSystemHandleKind_FileHandle), storage(pStorage), stream(pStream), fd(0) {} explicit MSFileSystemHandle(IEnumSTATSTG *pEnumSTATG) : kind(MSFileSystemHandleKind_FindHandle), storage(pEnumSTATG) {} MSFileSystemHandle(MSFileSystemHandle &&other) { kind = other.kind; storage.p = other.storage.Detach(); stream.p = other.stream.Detach(); } HANDLE GetHandle() const { return (HANDLE)this; } IEnumSTATSTG *GetEnumStatStg() { DXASSERT(kind == MSFileSystemHandleKind_FindHandle, "otherwise caller didn't check"); return (IEnumSTATSTG *)storage.p; } }; namespace llvm { namespace sys { namespace fs { class MSFileSystemForIface : public MSFileSystem { private: CComPtr<IDxcSystemAccess> m_system; typedef std::unordered_multimap<LPCVOID, ID3D10Blob *> TViewMap; TViewMap m_mappingViews; MSFileSystemHandle m_knownHandle0; MSFileSystemHandle m_knownHandle1; MSFileSystemHandle m_knownHandle2; HRESULT AddFindHandle(IEnumSTATSTG *enumStatStg, HANDLE *pResult) throw(); HRESULT AddFileHandle(IUnknown *storage, IStream *stream, HANDLE *pResult) throw(); HRESULT AddMappingHandle(IUnknown *mapping, HANDLE *pResult) throw(); HRESULT AddMappingView(ID3D10Blob *blob) throw(); HRESULT EnsureFDAvailable(int fd); HANDLE GetHandleForFD(int fd) throw(); void GetFindHandle(HANDLE findHandle, IEnumSTATSTG **enumStatStg) throw(); int GetHandleFD(HANDLE fileHandle) throw(); void GetHandleMapping(HANDLE fileHandle, IUnknown **pResult) throw(); void GetHandleStorage(HANDLE fileHandle, IUnknown **pResult) throw(); void GetHandleStream(HANDLE fileHandle, IStream **pResult) throw(); void CloseInternalHandle(HANDLE findHandle) throw(); void RemoveMappingView(LPCVOID address) throw(); public: MSFileSystemForIface(IDxcSystemAccess *access); virtual BOOL FindNextFileW(HANDLE hFindFile, LPWIN32_FIND_DATAW lpFindFileData) throw() override; virtual HANDLE FindFirstFileW(LPCWSTR lpFileName, LPWIN32_FIND_DATAW lpFindFileData) throw() override; virtual void FindClose(HANDLE findHandle) throw() override; virtual HANDLE CreateFileW(LPCWSTR lpFileName, DWORD dwDesiredAccess, DWORD dwShareMode, DWORD dwCreationDisposition, DWORD dwFlagsAndAttributes) throw() override; virtual BOOL SetFileTime(HANDLE hFile, const FILETIME *lpCreationTime, const FILETIME *lpLastAccessTime, const FILETIME *lpLastWriteTime) throw() override; virtual BOOL GetFileInformationByHandle( HANDLE hFile, LPBY_HANDLE_FILE_INFORMATION lpFileInformation) throw() override; virtual DWORD GetFileType(HANDLE hFile) throw() override; virtual BOOL CreateHardLinkW(LPCWSTR lpFileName, LPCWSTR lpExistingFileName) throw() override; virtual BOOL MoveFileExW(LPCWSTR lpExistingFileName, LPCWSTR lpNewFileName, DWORD dwFlags) throw() override; virtual DWORD GetFileAttributesW(LPCWSTR lpFileName) throw() override; virtual BOOL CloseHandle(HANDLE hObject) throw() override; virtual BOOL DeleteFileW(LPCWSTR lpFileName) throw() override; virtual BOOL RemoveDirectoryW(LPCWSTR lpFileName) throw() override; virtual BOOL CreateDirectoryW(LPCWSTR lpPathName) throw() override; virtual DWORD GetCurrentDirectoryW(DWORD nBufferLength, LPWSTR lpBuffer) throw() override; virtual DWORD GetMainModuleFileNameW(LPWSTR lpFilename, DWORD nSize) throw() override; virtual DWORD GetTempPathW(DWORD nBufferLength, LPWSTR lpBuffer) throw() override; virtual BOOLEAN CreateSymbolicLinkW(LPCWSTR lpSymlinkFileName, LPCWSTR lpTargetFileName, DWORD dwFlags) throw() override; virtual bool SupportsCreateSymbolicLink() throw() override; virtual BOOL ReadFile(HANDLE hFile, LPVOID lpBuffer, DWORD nNumberOfBytesToRead, LPDWORD lpNumberOfBytesRead) throw() override; virtual HANDLE CreateFileMappingW(HANDLE hFile, DWORD flProtect, DWORD dwMaximumSizeHigh, DWORD dwMaximumSizeLow) throw() override; virtual LPVOID MapViewOfFile(HANDLE hFileMappingObject, DWORD dwDesiredAccess, DWORD dwFileOffsetHigh, DWORD dwFileOffsetLow, SIZE_T dwNumberOfBytesToMap) throw() override; virtual BOOL UnmapViewOfFile(LPCVOID lpBaseAddress) throw() override; // Console APIs. virtual bool FileDescriptorIsDisplayed(int fd) throw() override; virtual unsigned GetColumnCount(DWORD nStdHandle) throw() override; virtual unsigned GetConsoleOutputTextAttributes() throw() override; virtual void SetConsoleOutputTextAttributes(unsigned attributes) throw() override; virtual void ResetConsoleOutputTextAttributes() throw() override; // CRT APIs. virtual int open_osfhandle(intptr_t osfhandle, int flags) throw() override; virtual intptr_t get_osfhandle(int fd) throw() override; virtual int close(int fd) throw() override; virtual long lseek(int fd, long offset, int origin) throw() override; virtual int setmode(int fd, int mode) throw() override; virtual errno_t resize_file(LPCWSTR path, uint64_t size) throw() override; virtual int Read(int fd, void *buffer, unsigned int count) throw() override; virtual int Write(int fd, const void *buffer, unsigned int count) throw() override; #ifndef _WIN32 virtual int Open(const char *lpFileName, int flags, mode_t mode) throw() override; virtual int Stat(const char *lpFileName, struct stat *Status) throw() override; virtual int Fstat(int FD, struct stat *Status) throw() override; #endif }; MSFileSystemForIface::MSFileSystemForIface(IDxcSystemAccess *systemAccess) : m_system(systemAccess), m_knownHandle0(0), m_knownHandle1(1), m_knownHandle2(2) {} HRESULT MSFileSystemForIface::AddMappingHandle(IUnknown *mapping, HANDLE *pResult) throw() { DXASSERT_NOMSG(mapping != nullptr); DXASSERT_NOMSG(pResult != nullptr); HRESULT hr = S_OK; MSFileSystemHandle *handle = nullptr; *pResult = INVALID_HANDLE_VALUE; handle = new (std::nothrow) MSFileSystemHandle(mapping); IFCOOM(handle); *pResult = handle->GetHandle(); Cleanup: return hr; } HRESULT MSFileSystemForIface::AddMappingView(ID3D10Blob *blob) throw() { DXASSERT_NOMSG(blob != nullptr); LPVOID address = blob->GetBufferPointer(); try { m_mappingViews.insert(std::pair<LPVOID, ID3D10Blob *>(address, blob)); } catch (std::bad_alloc &) { return E_OUTOFMEMORY; } blob->AddRef(); return S_OK; } HRESULT MSFileSystemForIface::AddFindHandle(IEnumSTATSTG *enumStatStg, HANDLE *pResult) throw() { DXASSERT_NOMSG(enumStatStg != nullptr); DXASSERT_NOMSG(pResult != nullptr); HRESULT hr = S_OK; MSFileSystemHandle *handle = nullptr; *pResult = INVALID_HANDLE_VALUE; handle = new (std::nothrow) MSFileSystemHandle(enumStatStg); IFCOOM(handle); *pResult = handle->GetHandle(); Cleanup: return hr; } HRESULT MSFileSystemForIface::AddFileHandle(IUnknown *storage, IStream *stream, HANDLE *pResult) throw() { DXASSERT_NOMSG(storage != nullptr); DXASSERT_NOMSG(pResult != nullptr); HRESULT hr = S_OK; MSFileSystemHandle *handle = nullptr; *pResult = INVALID_HANDLE_VALUE; handle = new (std::nothrow) MSFileSystemHandle(storage, stream); IFCOOM(handle); *pResult = handle->GetHandle(); Cleanup: return hr; } void MSFileSystemForIface::CloseInternalHandle(HANDLE handle) throw() { DXASSERT_NOMSG(handle != nullptr); DXASSERT_NOMSG(handle != INVALID_HANDLE_VALUE); MSFileSystemHandle *fsHandle = reinterpret_cast<MSFileSystemHandle *>(handle); if (fsHandle == &m_knownHandle0 || fsHandle == &m_knownHandle1 || fsHandle == &m_knownHandle2) { fsHandle->stream.Release(); fsHandle->storage.Release(); } else { delete fsHandle; } } void MSFileSystemForIface::RemoveMappingView(LPCVOID address) throw() { TViewMap::iterator i = m_mappingViews.find(address); DXASSERT(i != m_mappingViews.end(), "otherwise pointer to view isn't in map"); DXASSERT(i->second != nullptr, "otherwise blob is null and should not have been added"); i->second->Release(); m_mappingViews.erase(i); } void MSFileSystemForIface::GetFindHandle(HANDLE findHandle, IEnumSTATSTG **enumStatStg) throw() { DXASSERT_NOMSG(findHandle != nullptr); DXASSERT_NOMSG(enumStatStg != nullptr); MSFileSystemHandle *fsHandle = reinterpret_cast<MSFileSystemHandle *>(findHandle); DXASSERT(fsHandle->kind == MSFileSystemHandle::MSFileSystemHandleKind_FindHandle, "otherwise caller is passing wrong handle to API"); *enumStatStg = fsHandle->GetEnumStatStg(); DXASSERT(*enumStatStg != nullptr, "otherwise it should not have been added to handle entry"); (*enumStatStg)->AddRef(); } int MSFileSystemForIface::GetHandleFD(HANDLE fileHandle) throw() { DXASSERT_NOMSG(fileHandle != nullptr); MSFileSystemHandle *fsHandle = reinterpret_cast<MSFileSystemHandle *>(fileHandle); DXASSERT(fsHandle->kind == MSFileSystemHandle::MSFileSystemHandleKind_FileHandle, "otherwise caller is passing wrong handle to API"); return fsHandle->fd; } void MSFileSystemForIface::GetHandleMapping(HANDLE mapping, IUnknown **pResult) throw() { DXASSERT_NOMSG(mapping != nullptr); DXASSERT_NOMSG(pResult != nullptr); MSFileSystemHandle *fsHandle = reinterpret_cast<MSFileSystemHandle *>(mapping); DXASSERT(fsHandle->kind == MSFileSystemHandle::MSFileSystemHandleKind_FileMappingHandle, "otherwise caller is passing wrong handle to API"); *pResult = fsHandle->storage.p; DXASSERT(*pResult != nullptr, "otherwise it should not be requested through GetHandleMapping"); (*pResult)->AddRef(); } void MSFileSystemForIface::GetHandleStorage(HANDLE fileHandle, IUnknown **pResult) throw() { DXASSERT_NOMSG(fileHandle != nullptr); DXASSERT_NOMSG(pResult != nullptr); MSFileSystemHandle *fsHandle = reinterpret_cast<MSFileSystemHandle *>(fileHandle); DXASSERT(fsHandle->kind == MSFileSystemHandle::MSFileSystemHandleKind_FileHandle, "otherwise caller is passing wrong handle to API"); *pResult = fsHandle->storage.p; DXASSERT(*pResult != nullptr, "otherwise it should not be requested through GetHandleStorage"); (*pResult)->AddRef(); } void MSFileSystemForIface::GetHandleStream(HANDLE fileHandle, IStream **pResult) throw() { DXASSERT_NOMSG(fileHandle != nullptr); DXASSERT_NOMSG(pResult != nullptr); MSFileSystemHandle *fsHandle = reinterpret_cast<MSFileSystemHandle *>(fileHandle); DXASSERT(fsHandle->kind == MSFileSystemHandle::MSFileSystemHandleKind_FileHandle, "otherwise caller is passing wrong handle to API"); *pResult = fsHandle->stream.p; DXASSERT(*pResult != nullptr, "otherwise it should not be requested through GetHandleStream"); (*pResult)->AddRef(); } HANDLE MSFileSystemForIface::FindFirstFileW( LPCWSTR lpFileName, LPWIN32_FIND_DATAW lpFindFileData) throw() { HRESULT hr = S_OK; CComPtr<IEnumSTATSTG> enumStatStg; HANDLE resultValue = INVALID_HANDLE_VALUE; STATSTG elt; ULONG fetched; ZeroMemory(&elt, sizeof(elt)); ZeroMemory(lpFindFileData, sizeof(*lpFindFileData)); fetched = 0; IFC(m_system->EnumFiles(lpFileName, &enumStatStg)); IFC(enumStatStg->Next(1, &elt, &fetched)); if (fetched == 0) { IFC(HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND)); } else { IFC(CopyStatStg(&elt, lpFindFileData)); IFC(AddFindHandle(enumStatStg, &resultValue)); } Cleanup: ClearStatStg(&elt); if (FAILED(hr)) { SetLastError(WIN32_FROM_HRESULT(hr)); return INVALID_HANDLE_VALUE; } DXASSERT(resultValue != INVALID_HANDLE_VALUE, "otherwise AddFindHandle failed to return a valid handle"); return resultValue; } BOOL MSFileSystemForIface::FindNextFileW( HANDLE hFindFile, LPWIN32_FIND_DATAW lpFindFileData) throw() { HRESULT hr = S_OK; CComPtr<IEnumSTATSTG> enumStatStg; STATSTG elt; ULONG fetched; ZeroMemory(&elt, sizeof(elt)); ZeroMemory(lpFindFileData, sizeof(*lpFindFileData)); fetched = 0; GetFindHandle(hFindFile, &enumStatStg); IFC(enumStatStg->Next(1, &elt, &fetched)); if (fetched == 0) { IFC(HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND)); } else { IFC(CopyStatStg(&elt, lpFindFileData)); } Cleanup: if (FAILED(hr)) { SetLastError(WIN32_FROM_HRESULT(hr)); return FALSE; } return TRUE; } void MSFileSystemForIface::FindClose(HANDLE findHandle) throw() { CloseInternalHandle(findHandle); } HANDLE MSFileSystemForIface::CreateFileW(LPCWSTR lpFileName, DWORD dwDesiredAccess, DWORD dwShareMode, DWORD dwCreationDisposition, DWORD dwFlagsAndAttributes) throw() { HRESULT hr = S_OK; CComPtr<IUnknown> storage; CComPtr<IStream> stream; HANDLE resultHandle = INVALID_HANDLE_VALUE; IFC(m_system->OpenStorage(lpFileName, dwDesiredAccess, dwShareMode, dwCreationDisposition, dwFlagsAndAttributes, &storage)); IFC(storage.QueryInterface(&stream)); IFC(AddFileHandle(storage, stream, &resultHandle)); Cleanup: if (FAILED(hr)) { SetLastError(WIN32_FROM_HRESULT(hr)); return INVALID_HANDLE_VALUE; } return resultHandle; } BOOL MSFileSystemForIface::SetFileTime( HANDLE hFile, const FILETIME *lpCreationTime, const FILETIME *lpLastAccessTime, const FILETIME *lpLastWriteTime) throw() { HRESULT hr = S_OK; CComPtr<IUnknown> storage; GetHandleStorage(hFile, &storage); IFC(m_system->SetStorageTime(storage, lpCreationTime, lpLastAccessTime, lpLastWriteTime)); Cleanup: if (FAILED(hr)) { SetLastError(WIN32_FROM_HRESULT(hr)); return FALSE; } return TRUE; } BOOL MSFileSystemForIface::GetFileInformationByHandle( HANDLE hFile, LPBY_HANDLE_FILE_INFORMATION lpFileInformation) throw() { HRESULT hr = S_OK; CComPtr<IUnknown> storage; GetHandleStorage(hFile, &storage); IFC(m_system->GetFileInformationForStorage(storage, lpFileInformation)); Cleanup: if (FAILED(hr)) { SetLastError(WIN32_FROM_HRESULT(hr)); return FALSE; } return TRUE; } DWORD MSFileSystemForIface::GetFileType(HANDLE hFile) throw() { HRESULT hr = S_OK; CComPtr<IUnknown> storage; DWORD fileType; GetHandleStorage(hFile, &storage); IFC(m_system->GetFileTypeForStorage(storage, &fileType)); if (fileType == FILE_TYPE_UNKNOWN) { SetLastError(NO_ERROR); } Cleanup: if (FAILED(hr)) { SetLastError(WIN32_FROM_HRESULT(hr)); fileType = FILE_TYPE_UNKNOWN; } return fileType; } BOOL MSFileSystemForIface::CreateHardLinkW(LPCWSTR lpFileName, LPCWSTR lpExistingFileName) throw() { SetLastError(ERROR_FUNCTION_NOT_CALLED); return FALSE; } BOOL MSFileSystemForIface::MoveFileExW(LPCWSTR lpExistingFileName, LPCWSTR lpNewFileName, DWORD dwFlags) throw() { SetLastError(ERROR_FUNCTION_NOT_CALLED); return FALSE; } DWORD MSFileSystemForIface::GetFileAttributesW(LPCWSTR lpFileName) throw() { HRESULT hr = S_OK; DWORD attributes; IFC(m_system->GetFileAttributesForStorage(lpFileName, &attributes)); Cleanup: if (FAILED(hr)) { SetLastError(WIN32_FROM_HRESULT(hr)); attributes = INVALID_FILE_ATTRIBUTES; } return attributes; } BOOL MSFileSystemForIface::CloseHandle(HANDLE hObject) throw() { this->CloseInternalHandle(hObject); return TRUE; } BOOL MSFileSystemForIface::DeleteFileW(LPCWSTR lpFileName) throw() { SetLastError(ERROR_FUNCTION_NOT_CALLED); return FALSE; } BOOL MSFileSystemForIface::RemoveDirectoryW(LPCWSTR lpFileName) throw() { SetLastError(ERROR_FUNCTION_NOT_CALLED); return FALSE; } BOOL MSFileSystemForIface::CreateDirectoryW(LPCWSTR lpPathName) throw() { SetLastError(ERROR_FUNCTION_NOT_CALLED); return FALSE; } DWORD MSFileSystemForIface::GetCurrentDirectoryW(DWORD nBufferLength, LPWSTR lpBuffer) throw() { DWORD written = 0; HRESULT hr = S_OK; IFC(m_system->GetCurrentDirectoryForStorage(nBufferLength, lpBuffer, &written)); Cleanup: if (FAILED(hr)) { SetLastError(WIN32_FROM_HRESULT(hr)); return 0; } return written; } DWORD MSFileSystemForIface::GetMainModuleFileNameW(LPWSTR lpFilename, DWORD nSize) throw() { DWORD written = 0; HRESULT hr = S_OK; IFC(m_system->GetMainModuleFileNameW(nSize, lpFilename, &written)); Cleanup: if (FAILED(hr)) { SetLastError(WIN32_FROM_HRESULT(hr)); return 0; } return written; } DWORD MSFileSystemForIface::GetTempPathW(DWORD nBufferLength, LPWSTR lpBuffer) throw() { DWORD written = 0; HRESULT hr = S_OK; IFC(m_system->GetTempStoragePath(nBufferLength, lpBuffer, &written)); Cleanup: if (FAILED(hr)) { SetLastError(WIN32_FROM_HRESULT(hr)); return 0; } return written; } BOOLEAN MSFileSystemForIface::CreateSymbolicLinkW(LPCWSTR lpSymlinkFileName, LPCWSTR lpTargetFileName, DWORD dwFlags) throw() { SetLastError(ERROR_FUNCTION_NOT_CALLED); return FALSE; } bool MSFileSystemForIface::SupportsCreateSymbolicLink() throw() { return false; } BOOL MSFileSystemForIface::ReadFile(HANDLE hFile, LPVOID lpBuffer, DWORD nNumberOfBytesToRead, LPDWORD lpNumberOfBytesRead) throw() { HRESULT hr = S_OK; CComPtr<IStream> stream; GetHandleStream(hFile, &stream); ULONG cbRead; IFC(stream->Read(lpBuffer, nNumberOfBytesToRead, &cbRead)); if (lpNumberOfBytesRead != nullptr) { *lpNumberOfBytesRead = cbRead; } Cleanup: if (FAILED(hr)) { SetLastError(WIN32_FROM_HRESULT(hr)); return FALSE; } return TRUE; } HANDLE MSFileSystemForIface::CreateFileMappingW(HANDLE hFile, DWORD flProtect, DWORD dwMaximumSizeHigh, DWORD dwMaximumSizeLow) throw() { HRESULT hr = S_OK; HANDLE result = INVALID_HANDLE_VALUE; CComPtr<IUnknown> storage; CComPtr<IUnknown> mapping; GetHandleStorage(hFile, &storage); IFC(m_system->CreateStorageMapping(storage, flProtect, dwMaximumSizeHigh, dwMaximumSizeLow, &mapping)); IFC(AddMappingHandle(mapping, &result)); Cleanup: if (FAILED(hr)) { SetLastError(WIN32_FROM_HRESULT(hr)); return INVALID_HANDLE_VALUE; } return result; } LPVOID MSFileSystemForIface::MapViewOfFile( HANDLE hFileMappingObject, DWORD dwDesiredAccess, DWORD dwFileOffsetHigh, DWORD dwFileOffsetLow, SIZE_T dwNumberOfBytesToMap) throw() { HRESULT hr = S_OK; CComPtr<IUnknown> mapping; CComPtr<ID3D10Blob> blob; GetHandleMapping(hFileMappingObject, &mapping); IFC(m_system->MapViewOfFile(mapping, dwDesiredAccess, dwFileOffsetHigh, dwFileOffsetLow, dwNumberOfBytesToMap, &blob)); IFC(AddMappingView(blob)); Cleanup: if (FAILED(hr)) { SetLastError(WIN32_FROM_HRESULT(hr)); return INVALID_HANDLE_VALUE; } return blob->GetBufferPointer(); } BOOL MSFileSystemForIface::UnmapViewOfFile(LPCVOID lpBaseAddress) throw() { RemoveMappingView(lpBaseAddress); return TRUE; } bool MSFileSystemForIface::FileDescriptorIsDisplayed(int fd) throw() { return false; } unsigned MSFileSystemForIface::GetColumnCount(DWORD nStdHandle) throw() { return 0; } unsigned MSFileSystemForIface::GetConsoleOutputTextAttributes() throw() { return 0; } void MSFileSystemForIface::SetConsoleOutputTextAttributes( unsigned attributes) throw() { return; } void MSFileSystemForIface::ResetConsoleOutputTextAttributes() throw() {} int MSFileSystemForIface::open_osfhandle(intptr_t osfhandle, int flags) throw() { return GetHandleFD((HANDLE)osfhandle); } HRESULT MSFileSystemForIface::EnsureFDAvailable(int fd) { MSFileSystemHandle *ptr; switch (fd) { case 0: ptr = &m_knownHandle0; break; case 1: ptr = &m_knownHandle1; break; case 2: ptr = &m_knownHandle2; break; default: return S_OK; } HRESULT hr = S_OK; if (ptr->storage == nullptr) { CComPtr<IUnknown> storage; CComPtr<IStream> stream; IFC(m_system->OpenStdStorage(fd, &storage)); IFC(storage.QueryInterface(&stream)); ptr->storage = storage; ptr->stream = stream; } DXASSERT(ptr->storage != nullptr, "otherwise we should have failed to initialize"); DXASSERT(ptr->stream != nullptr, "otherwise we should have failed to initialize - input/output/error " "should support streams"); Cleanup: return hr; } HANDLE MSFileSystemForIface::GetHandleForFD(int fd) throw() { MSFileSystemHandle *ptr; switch (fd) { case 0: ptr = &m_knownHandle0; break; case 1: ptr = &m_knownHandle1; break; case 2: ptr = &m_knownHandle2; break; default: ptr = (MSFileSystemHandle *)(uintptr_t)fd; break; } return ptr->GetHandle(); } intptr_t MSFileSystemForIface::get_osfhandle(int fd) throw() { if (FAILED(EnsureFDAvailable(fd))) { errno = EBADF; return -1; } return (intptr_t)GetHandleForFD(fd); } int MSFileSystemForIface::close(int fd) throw() { HANDLE h = GetHandleForFD(fd); this->CloseInternalHandle(h); return 0; } long MSFileSystemForIface::lseek(int fd, long offset, int origin) throw() { HRESULT hr = S_OK; CComPtr<IStream> stream; LARGE_INTEGER li; ULARGE_INTEGER uli; if (FAILED(EnsureFDAvailable(fd))) { errno = EBADF; return -1; } GetHandleStream(GetHandleForFD(fd), &stream); li.HighPart = 0; li.LowPart = offset; IFC(stream->Seek(li, origin, &uli)); Cleanup: if (FAILED(hr)) { errno = EINVAL; return -1; } if (uli.HighPart > 0) { errno = EOVERFLOW; return -1; } return uli.LowPart; } int MSFileSystemForIface::setmode(int fd, int mode) throw() { return 0; } errno_t MSFileSystemForIface::resize_file(LPCWSTR path, uint64_t size) throw() { return EBADF; } int MSFileSystemForIface::Read(int fd, void *buffer, unsigned int count) throw() { HRESULT hr = S_OK; CComPtr<IStream> stream; ULONG cbRead = 0; if (FAILED(EnsureFDAvailable(fd))) { errno = EBADF; return -1; } GetHandleStream(GetHandleForFD(fd), &stream); IFC(stream->Read(buffer, count, &cbRead)); Cleanup: if (FAILED(hr)) { errno = EINVAL; return -1; } return (int)cbRead; } int MSFileSystemForIface::Write(int fd, const void *buffer, unsigned int count) throw() { HRESULT hr = S_OK; CComPtr<IStream> stream; ULONG cbWritten = 0; if (FAILED(EnsureFDAvailable(fd))) { errno = EBADF; return -1; } GetHandleStream(GetHandleForFD(fd), &stream); IFC(stream->Write(buffer, count, &cbWritten)); Cleanup: if (FAILED(hr)) { errno = EINVAL; return -1; } return (int)cbWritten; } #ifndef _WIN32 int MSFileSystemForIface::Open(const char *lpFileName, int flags, mode_t mode) throw() { SetLastError(ERROR_FUNCTION_NOT_CALLED); return FALSE; } int MSFileSystemForIface::Stat(const char *lpFileName, struct stat *Status) throw() { SetLastError(ERROR_FUNCTION_NOT_CALLED); return FALSE; } int MSFileSystemForIface::Fstat(int FD, struct stat *Status) throw() { SetLastError(ERROR_FUNCTION_NOT_CALLED); return FALSE; } #endif } // end namespace fs } // end namespace sys } // end namespace llvm /////////////////////////////////////////////////////////////////////////////////////////////////// // Blocked MSFileSystem implementation. #ifndef NDEBUG static void MSFileSystemBlockedCalled() { DebugBreak(); } #else static void MSFileSystemBlockedCalled() {} #endif static BOOL MSFileSystemBlockedErrWin32() { MSFileSystemBlockedCalled(); SetLastError(ERROR_FUNCTION_NOT_CALLED); return FALSE; } static HANDLE MSFileSystemBlockedHandle() { MSFileSystemBlockedCalled(); SetLastError(ERROR_FUNCTION_NOT_CALLED); return INVALID_HANDLE_VALUE; } static int MSFileSystemBlockedErrno() { MSFileSystemBlockedCalled(); errno = EBADF; return -1; } static int MSFileSystemBlockedErrnoT() { MSFileSystemBlockedCalled(); return EBADF; } namespace llvm { namespace sys { namespace fs { class MSFileSystemBlocked : public MSFileSystem { private: public: MSFileSystemBlocked(); virtual BOOL FindNextFileW(HANDLE, LPWIN32_FIND_DATAW) throw() override { return MSFileSystemBlockedErrWin32(); } virtual HANDLE FindFirstFileW(LPCWSTR lpFileName, LPWIN32_FIND_DATAW lpFindFileData) throw() override { return MSFileSystemBlockedHandle(); } virtual void FindClose(HANDLE findHandle) throw() override { MSFileSystemBlockedCalled(); } virtual HANDLE CreateFileW(LPCWSTR lpFileName, DWORD dwDesiredAccess, DWORD dwShareMode, DWORD dwCreationDisposition, DWORD dwFlagsAndAttributes) throw() override { return MSFileSystemBlockedHandle(); } virtual BOOL SetFileTime(HANDLE hFile, const FILETIME *lpCreationTime, const FILETIME *lpLastAccessTime, const FILETIME *lpLastWriteTime) throw() override { return MSFileSystemBlockedErrWin32(); } virtual BOOL GetFileInformationByHandle( HANDLE hFile, LPBY_HANDLE_FILE_INFORMATION lpFileInformation) throw() override { return MSFileSystemBlockedErrWin32(); } virtual DWORD GetFileType(HANDLE hFile) throw() override { MSFileSystemBlockedErrWin32(); return FILE_TYPE_UNKNOWN; } virtual BOOL CreateHardLinkW(LPCWSTR lpFileName, LPCWSTR lpExistingFileName) throw() override { return MSFileSystemBlockedErrWin32(); } virtual BOOL MoveFileExW(LPCWSTR lpExistingFileName, LPCWSTR lpNewFileName, DWORD dwFlags) throw() override { return MSFileSystemBlockedErrWin32(); } virtual DWORD GetFileAttributesW(LPCWSTR lpFileName) throw() override { MSFileSystemBlockedErrWin32(); return 0; } virtual BOOL CloseHandle(HANDLE hObject) throw() override { return MSFileSystemBlockedErrWin32(); } virtual BOOL DeleteFileW(LPCWSTR lpFileName) throw() override { return MSFileSystemBlockedErrWin32(); } virtual BOOL RemoveDirectoryW(LPCWSTR lpFileName) throw() override { return MSFileSystemBlockedErrWin32(); } virtual BOOL CreateDirectoryW(LPCWSTR lpPathName) throw() override { return MSFileSystemBlockedErrWin32(); } virtual DWORD GetCurrentDirectoryW(DWORD nBufferLength, LPWSTR lpBuffer) throw() override; virtual DWORD GetMainModuleFileNameW(LPWSTR lpFilename, DWORD nSize) throw() override; virtual DWORD GetTempPathW(DWORD nBufferLength, LPWSTR lpBuffer) throw() override; virtual BOOLEAN CreateSymbolicLinkW(LPCWSTR lpSymlinkFileName, LPCWSTR lpTargetFileName, DWORD dwFlags) throw() override { return MSFileSystemBlockedErrWin32(); } virtual bool SupportsCreateSymbolicLink() throw() override { MSFileSystemBlockedErrWin32(); return false; } virtual BOOL ReadFile(HANDLE hFile, LPVOID lpBuffer, DWORD nNumberOfBytesToRead, LPDWORD lpNumberOfBytesRead) throw() override { return MSFileSystemBlockedErrWin32(); } virtual HANDLE CreateFileMappingW(HANDLE hFile, DWORD flProtect, DWORD dwMaximumSizeHigh, DWORD dwMaximumSizeLow) throw() override { return MSFileSystemBlockedHandle(); } virtual LPVOID MapViewOfFile(HANDLE hFileMappingObject, DWORD dwDesiredAccess, DWORD dwFileOffsetHigh, DWORD dwFileOffsetLow, SIZE_T dwNumberOfBytesToMap) throw() override { MSFileSystemBlockedErrWin32(); return nullptr; } virtual BOOL UnmapViewOfFile(LPCVOID lpBaseAddress) throw() override { return MSFileSystemBlockedErrWin32(); } // Console APIs. virtual bool FileDescriptorIsDisplayed(int fd) throw() override { MSFileSystemBlockedCalled(); return false; } virtual unsigned GetColumnCount(DWORD nStdHandle) throw() override { MSFileSystemBlockedCalled(); return 80; } virtual unsigned GetConsoleOutputTextAttributes() throw() override { MSFileSystemBlockedCalled(); return 0; } virtual void SetConsoleOutputTextAttributes(unsigned attributes) throw() override { MSFileSystemBlockedCalled(); } virtual void ResetConsoleOutputTextAttributes() throw() override { MSFileSystemBlockedCalled(); } // CRT APIs. virtual int open_osfhandle(intptr_t osfhandle, int flags) throw() override { return MSFileSystemBlockedErrno(); } virtual intptr_t get_osfhandle(int fd) throw() override { MSFileSystemBlockedErrno(); return 0; } virtual int close(int fd) throw() override { return MSFileSystemBlockedErrno(); } virtual long lseek(int fd, long offset, int origin) throw() override { return MSFileSystemBlockedErrno(); } virtual int setmode(int fd, int mode) throw() override { return MSFileSystemBlockedErrno(); } virtual errno_t resize_file(LPCWSTR path, uint64_t size) throw() override { return MSFileSystemBlockedErrnoT(); } virtual int Read(int fd, void *buffer, unsigned int count) throw() override { return MSFileSystemBlockedErrno(); } virtual int Write(int fd, const void *buffer, unsigned int count) throw() override { return MSFileSystemBlockedErrno(); } // Unix interface #ifndef _WIN32 virtual int Open(const char *lpFileName, int flags, mode_t mode) throw() override { return MSFileSystemBlockedErrno(); } virtual int Stat(const char *lpFileName, struct stat *Status) throw() override { return MSFileSystemBlockedErrno(); } virtual int Fstat(int FD, struct stat *Status) throw() override { return MSFileSystemBlockedErrno(); } #endif }; MSFileSystemBlocked::MSFileSystemBlocked() {} DWORD MSFileSystemBlocked::GetCurrentDirectoryW(DWORD nBufferLength, LPWSTR lpBuffer) throw() { if (nBufferLength > 1) { lpBuffer[0] = L'.'; lpBuffer[1] = L'\0'; } return 1; } DWORD MSFileSystemBlocked::GetMainModuleFileNameW(LPWSTR lpFilename, DWORD nSize) throw() { SetLastError(NO_ERROR); return 0; } DWORD MSFileSystemBlocked::GetTempPathW(DWORD nBufferLength, LPWSTR lpBuffer) throw() { if (nBufferLength > 1) { lpBuffer[0] = L'.'; lpBuffer[1] = L'\0'; } return 1; } } // end namespace fs } // end namespace sys } // end namespace llvm /////////////////////////////////////////////////////////////////////////////////////////////////// // Externally visible functions. HRESULT CreateMSFileSystemForIface(IUnknown *pService, ::llvm::sys::fs::MSFileSystem **pResult) throw() { DXASSERT_NOMSG(pService != nullptr); DXASSERT_NOMSG(pResult != nullptr); CComPtr<IDxcSystemAccess> systemAccess; HRESULT hr = pService->QueryInterface(__uuidof(IDxcSystemAccess), (void **)&systemAccess); if (FAILED(hr)) return hr; *pResult = new (std::nothrow)::llvm::sys::fs::MSFileSystemForIface(systemAccess); return (*pResult != nullptr) ? S_OK : E_OUTOFMEMORY; } HRESULT CreateMSFileSystemBlocked(::llvm::sys::fs::MSFileSystem **pResult) throw() { DXASSERT_NOMSG(pResult != nullptr); *pResult = new (std::nothrow)::llvm::sys::fs::MSFileSystemBlocked(); return (*pResult != nullptr) ? S_OK : E_OUTOFMEMORY; } #endif // _WIN32
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/Signals.cpp
//===- Signals.cpp - Signal Handling support --------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines some helpful functions for dealing with the possibility of // Unix signals occurring while your program is running. // //===----------------------------------------------------------------------===// #include "llvm/Support/Signals.h" #include "llvm/Config/config.h" namespace llvm { using namespace sys; //===----------------------------------------------------------------------===// //=== WARNING: Implementation here must contain only TRULY operating system //=== independent code. //===----------------------------------------------------------------------===// } // Include the platform-specific parts of this class. #ifdef LLVM_ON_UNIX #include "Unix/Signals.inc" #endif #ifdef LLVM_ON_WIN32 #include "Windows/Signals.inc" #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/SourceMgr.cpp
//===- SourceMgr.cpp - Manager for Simple Source Buffers & Diagnostics ----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the SourceMgr class. This class is used as a simple // substrate for diagnostics, #include handling, and other low level things for // simple parsers. // //===----------------------------------------------------------------------===// #include "llvm/Support/SourceMgr.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/Locale.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; static const size_t TabStop = 8; namespace { struct LineNoCacheTy { unsigned LastQueryBufferID; const char *LastQuery; unsigned LineNoOfQuery; }; } static LineNoCacheTy *getCache(void *Ptr) { return (LineNoCacheTy*)Ptr; } // HLSL Change Starts: add a Reset version of the destructor SourceMgr::~SourceMgr() { Reset(); } void SourceMgr::Reset() { // Delete the line # cache if allocated. if (LineNoCacheTy *Cache = getCache(LineNoCache)) { delete Cache; LineNoCache = nullptr; // MS Change } Buffers.clear(); IncludeDirectories.clear(); } // HLSL Change Ends: add a Reset version of the destructor unsigned SourceMgr::AddIncludeFile(const std::string &Filename, SMLoc IncludeLoc, std::string &IncludedFile) { IncludedFile = Filename; ErrorOr<std::unique_ptr<MemoryBuffer>> NewBufOrErr = MemoryBuffer::getFile(IncludedFile); // If the file didn't exist directly, see if it's in an include path. for (unsigned i = 0, e = IncludeDirectories.size(); i != e && !NewBufOrErr; ++i) { IncludedFile = IncludeDirectories[i] + sys::path::get_separator().data() + Filename; NewBufOrErr = MemoryBuffer::getFile(IncludedFile); } if (!NewBufOrErr) return 0; return AddNewSourceBuffer(std::move(*NewBufOrErr), IncludeLoc); } unsigned SourceMgr::FindBufferContainingLoc(SMLoc Loc) const { for (unsigned i = 0, e = Buffers.size(); i != e; ++i) if (Loc.getPointer() >= Buffers[i].Buffer->getBufferStart() && // Use <= here so that a pointer to the null at the end of the buffer // is included as part of the buffer. Loc.getPointer() <= Buffers[i].Buffer->getBufferEnd()) return i + 1; return 0; } std::pair<unsigned, unsigned> SourceMgr::getLineAndColumn(SMLoc Loc, unsigned BufferID) const { if (!BufferID) BufferID = FindBufferContainingLoc(Loc); assert(BufferID && "Invalid Location!"); const MemoryBuffer *Buff = getMemoryBuffer(BufferID); // Count the number of \n's between the start of the file and the specified // location. unsigned LineNo = 1; const char *BufStart = Buff->getBufferStart(); const char *Ptr = BufStart; // If we have a line number cache, and if the query is to a later point in the // same file, start searching from the last query location. This optimizes // for the case when multiple diagnostics come out of one file in order. if (LineNoCacheTy *Cache = getCache(LineNoCache)) if (Cache->LastQueryBufferID == BufferID && Cache->LastQuery <= Loc.getPointer()) { Ptr = Cache->LastQuery; LineNo = Cache->LineNoOfQuery; } // Scan for the location being queried, keeping track of the number of lines // we see. for (; SMLoc::getFromPointer(Ptr) != Loc; ++Ptr) if (*Ptr == '\n') ++LineNo; // Allocate the line number cache if it doesn't exist. if (!LineNoCache) LineNoCache = new LineNoCacheTy(); // Update the line # cache. LineNoCacheTy &Cache = *getCache(LineNoCache); Cache.LastQueryBufferID = BufferID; Cache.LastQuery = Ptr; Cache.LineNoOfQuery = LineNo; size_t NewlineOffs = StringRef(BufStart, Ptr-BufStart).find_last_of("\n\r"); if (NewlineOffs == StringRef::npos) NewlineOffs = ~(size_t)0; return std::make_pair(LineNo, Ptr-BufStart-NewlineOffs); } void SourceMgr::PrintIncludeStack(SMLoc IncludeLoc, raw_ostream &OS) const { if (IncludeLoc == SMLoc()) return; // Top of stack. unsigned CurBuf = FindBufferContainingLoc(IncludeLoc); assert(CurBuf && "Invalid or unspecified location!"); PrintIncludeStack(getBufferInfo(CurBuf).IncludeLoc, OS); OS << "Included from " << getBufferInfo(CurBuf).Buffer->getBufferIdentifier() << ":" << FindLineNumber(IncludeLoc, CurBuf) << ":\n"; } SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, SourceMgr::DiagKind Kind, const Twine &Msg, ArrayRef<SMRange> Ranges, ArrayRef<SMFixIt> FixIts) const { // First thing to do: find the current buffer containing the specified // location to pull out the source line. SmallVector<std::pair<unsigned, unsigned>, 4> ColRanges; std::pair<unsigned, unsigned> LineAndCol; const char *BufferID = "<unknown>"; std::string LineStr; if (Loc.isValid()) { unsigned CurBuf = FindBufferContainingLoc(Loc); assert(CurBuf && "Invalid or unspecified location!"); const MemoryBuffer *CurMB = getMemoryBuffer(CurBuf); BufferID = CurMB->getBufferIdentifier(); // Scan backward to find the start of the line. const char *LineStart = Loc.getPointer(); const char *BufStart = CurMB->getBufferStart(); while (LineStart != BufStart && LineStart[-1] != '\n' && LineStart[-1] != '\r') --LineStart; // Get the end of the line. const char *LineEnd = Loc.getPointer(); const char *BufEnd = CurMB->getBufferEnd(); while (LineEnd != BufEnd && LineEnd[0] != '\n' && LineEnd[0] != '\r') ++LineEnd; LineStr = std::string(LineStart, LineEnd); // Convert any ranges to column ranges that only intersect the line of the // location. for (unsigned i = 0, e = Ranges.size(); i != e; ++i) { SMRange R = Ranges[i]; if (!R.isValid()) continue; // If the line doesn't contain any part of the range, then ignore it. if (R.Start.getPointer() > LineEnd || R.End.getPointer() < LineStart) continue; // Ignore pieces of the range that go onto other lines. if (R.Start.getPointer() < LineStart) R.Start = SMLoc::getFromPointer(LineStart); if (R.End.getPointer() > LineEnd) R.End = SMLoc::getFromPointer(LineEnd); // Translate from SMLoc ranges to column ranges. // FIXME: Handle multibyte characters. ColRanges.push_back(std::make_pair(R.Start.getPointer()-LineStart, R.End.getPointer()-LineStart)); } LineAndCol = getLineAndColumn(Loc, CurBuf); } return SMDiagnostic(*this, Loc, BufferID, LineAndCol.first, LineAndCol.second-1, Kind, Msg.str(), LineStr, ColRanges, FixIts); } void SourceMgr::PrintMessage(raw_ostream &OS, const SMDiagnostic &Diagnostic, bool ShowColors) const { // Report the message with the diagnostic handler if present. if (DiagHandler) { DiagHandler(Diagnostic, DiagContext); return; } if (Diagnostic.getLoc().isValid()) { unsigned CurBuf = FindBufferContainingLoc(Diagnostic.getLoc()); assert(CurBuf && "Invalid or unspecified location!"); PrintIncludeStack(getBufferInfo(CurBuf).IncludeLoc, OS); } Diagnostic.print(nullptr, OS, ShowColors); } void SourceMgr::PrintMessage(raw_ostream &OS, SMLoc Loc, SourceMgr::DiagKind Kind, const Twine &Msg, ArrayRef<SMRange> Ranges, ArrayRef<SMFixIt> FixIts, bool ShowColors) const { PrintMessage(OS, GetMessage(Loc, Kind, Msg, Ranges, FixIts), ShowColors); } void SourceMgr::PrintMessage(SMLoc Loc, SourceMgr::DiagKind Kind, const Twine &Msg, ArrayRef<SMRange> Ranges, ArrayRef<SMFixIt> FixIts, bool ShowColors) const { PrintMessage(llvm::errs(), Loc, Kind, Msg, Ranges, FixIts, ShowColors); } //===----------------------------------------------------------------------===// // SMDiagnostic Implementation //===----------------------------------------------------------------------===// SMDiagnostic::SMDiagnostic(const SourceMgr &sm, SMLoc L, StringRef FN, int Line, int Col, SourceMgr::DiagKind Kind, StringRef Msg, StringRef LineStr, ArrayRef<std::pair<unsigned,unsigned> > Ranges, ArrayRef<SMFixIt> Hints) : SM(&sm), Loc(L), Filename(FN), LineNo(Line), ColumnNo(Col), Kind(Kind), Message(Msg), LineContents(LineStr), Ranges(Ranges.vec()), FixIts(Hints.begin(), Hints.end()) { std::sort(FixIts.begin(), FixIts.end()); } static void buildFixItLine(std::string &CaretLine, std::string &FixItLine, ArrayRef<SMFixIt> FixIts, ArrayRef<char> SourceLine){ if (FixIts.empty()) return; const char *LineStart = SourceLine.begin(); const char *LineEnd = SourceLine.end(); size_t PrevHintEndCol = 0; for (ArrayRef<SMFixIt>::iterator I = FixIts.begin(), E = FixIts.end(); I != E; ++I) { // If the fixit contains a newline or tab, ignore it. if (I->getText().find_first_of("\n\r\t") != StringRef::npos) continue; SMRange R = I->getRange(); // If the line doesn't contain any part of the range, then ignore it. if (R.Start.getPointer() > LineEnd || R.End.getPointer() < LineStart) continue; // Translate from SMLoc to column. // Ignore pieces of the range that go onto other lines. // FIXME: Handle multibyte characters in the source line. unsigned FirstCol; if (R.Start.getPointer() < LineStart) FirstCol = 0; else FirstCol = R.Start.getPointer() - LineStart; // If we inserted a long previous hint, push this one forwards, and add // an extra space to show that this is not part of the previous // completion. This is sort of the best we can do when two hints appear // to overlap. // // Note that if this hint is located immediately after the previous // hint, no space will be added, since the location is more important. unsigned HintCol = FirstCol; if (HintCol < PrevHintEndCol) HintCol = PrevHintEndCol + 1; // FIXME: This assertion is intended to catch unintended use of multibyte // characters in fixits. If we decide to do this, we'll have to track // separate byte widths for the source and fixit lines. assert((size_t)llvm::sys::locale::columnWidth(I->getText()) == I->getText().size()); // This relies on one byte per column in our fixit hints. unsigned LastColumnModified = HintCol + I->getText().size(); if (LastColumnModified > FixItLine.size()) FixItLine.resize(LastColumnModified, ' '); std::copy(I->getText().begin(), I->getText().end(), FixItLine.begin() + HintCol); PrevHintEndCol = LastColumnModified; // For replacements, mark the removal range with '~'. // FIXME: Handle multibyte characters in the source line. unsigned LastCol; if (R.End.getPointer() >= LineEnd) LastCol = LineEnd - LineStart; else LastCol = R.End.getPointer() - LineStart; std::fill(&CaretLine[FirstCol], &CaretLine[LastCol], '~'); } } static void printSourceLine(raw_ostream &S, StringRef LineContents) { // Print out the source line one character at a time, so we can expand tabs. for (unsigned i = 0, e = LineContents.size(), OutCol = 0; i != e; ++i) { if (LineContents[i] != '\t') { S << LineContents[i]; ++OutCol; continue; } // If we have a tab, emit at least one space, then round up to 8 columns. do { S << ' '; ++OutCol; } while ((OutCol % TabStop) != 0); } S << '\n'; } static bool isNonASCII(char c) { return c & 0x80; } void SMDiagnostic::print(const char *ProgName, raw_ostream &S, bool ShowColors, bool ShowKindLabel) const { // Display colors only if OS supports colors. ShowColors &= S.has_colors(); if (ShowColors) S.changeColor(raw_ostream::SAVEDCOLOR, true); if (ProgName && ProgName[0]) S << ProgName << ": "; if (!Filename.empty()) { if (Filename == "-") S << "<stdin>"; else S << Filename; if (LineNo != -1) { S << ':' << LineNo; if (ColumnNo != -1) S << ':' << (ColumnNo+1); } S << ": "; } if (ShowKindLabel) { switch (Kind) { case SourceMgr::DK_Error: if (ShowColors) S.changeColor(raw_ostream::RED, true); S << "error: "; break; case SourceMgr::DK_Warning: if (ShowColors) S.changeColor(raw_ostream::MAGENTA, true); S << "warning: "; break; case SourceMgr::DK_Note: if (ShowColors) S.changeColor(raw_ostream::BLACK, true); S << "note: "; break; } if (ShowColors) { S.resetColor(); S.changeColor(raw_ostream::SAVEDCOLOR, true); } } S << Message << '\n'; if (ShowColors) S.resetColor(); if (LineNo == -1 || ColumnNo == -1) return; // FIXME: If there are multibyte or multi-column characters in the source, all // our ranges will be wrong. To do this properly, we'll need a byte-to-column // map like Clang's TextDiagnostic. For now, we'll just handle tabs by // expanding them later, and bail out rather than show incorrect ranges and // misaligned fixits for any other odd characters. if (std::find_if(LineContents.begin(), LineContents.end(), isNonASCII) != LineContents.end()) { printSourceLine(S, LineContents); return; } size_t NumColumns = LineContents.size(); // Build the line with the caret and ranges. std::string CaretLine(NumColumns+1, ' '); // Expand any ranges. for (unsigned r = 0, e = Ranges.size(); r != e; ++r) { std::pair<unsigned, unsigned> R = Ranges[r]; std::fill(&CaretLine[R.first], &CaretLine[std::min((size_t)R.second, CaretLine.size())], '~'); } // Add any fix-its. // FIXME: Find the beginning of the line properly for multibyte characters. std::string FixItInsertionLine; buildFixItLine(CaretLine, FixItInsertionLine, FixIts, makeArrayRef(Loc.getPointer() - ColumnNo, LineContents.size())); // Finally, plop on the caret. if (unsigned(ColumnNo) <= NumColumns) CaretLine[ColumnNo] = '^'; else CaretLine[NumColumns] = '^'; // ... and remove trailing whitespace so the output doesn't wrap for it. We // know that the line isn't completely empty because it has the caret in it at // least. CaretLine.erase(CaretLine.find_last_not_of(' ')+1); printSourceLine(S, LineContents); if (ShowColors) S.changeColor(raw_ostream::GREEN, true); // Print out the caret line, matching tabs in the source line. for (unsigned i = 0, e = CaretLine.size(), OutCol = 0; i != e; ++i) { if (i >= LineContents.size() || LineContents[i] != '\t') { S << CaretLine[i]; ++OutCol; continue; } // Okay, we have a tab. Insert the appropriate number of characters. do { S << CaretLine[i]; ++OutCol; } while ((OutCol % TabStop) != 0); } S << '\n'; if (ShowColors) S.resetColor(); // Print out the replacement line, matching tabs in the source line. if (FixItInsertionLine.empty()) return; for (size_t i = 0, e = FixItInsertionLine.size(), OutCol = 0; i < e; ++i) { if (i >= LineContents.size() || LineContents[i] != '\t') { S << FixItInsertionLine[i]; ++OutCol; continue; } // Okay, we have a tab. Insert the appropriate number of characters. do { S << FixItInsertionLine[i]; // FIXME: This is trying not to break up replacements, but then to re-sync // with the tabs between replacements. This will fail, though, if two // fix-it replacements are exactly adjacent, or if a fix-it contains a // space. Really we should be precomputing column widths, which we'll // need anyway for multibyte chars. if (FixItInsertionLine[i] != ' ') ++i; ++OutCol; } while (((OutCol % TabStop) != 0) && i != e); } S << '\n'; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/Locale.cpp
#include "llvm/Support/Locale.h" #include "llvm/Support/Unicode.h" namespace llvm { namespace sys { namespace locale { int columnWidth(StringRef Text) { #if LLVM_ON_WIN32 return Text.size(); #else return llvm::sys::unicode::columnWidthUTF8(Text); #endif } bool isPrint(int UCS) { #if LLVM_ON_WIN32 // Restrict characters that we'll try to print to the lower part of ASCII // except for the control characters (0x20 - 0x7E). In general one can not // reliably output code points U+0080 and higher using narrow character C/C++ // output functions in Windows, because the meaning of the upper 128 codes is // determined by the active code page in the console. return ' ' <= UCS && UCS <= '~'; #else return llvm::sys::unicode::isPrintable(UCS); #endif } } // namespace locale } // namespace sys } // namespace llvm
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/APInt.cpp
//===-- APInt.cpp - Implement APInt class ---------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a class to represent arbitrary precision integer // constant values and provide a variety of arithmetic operations on them. // //===----------------------------------------------------------------------===// #include "llvm/ADT/APInt.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include <cmath> #include <cstdlib> #include <cstring> #include <limits> using namespace llvm; #define DEBUG_TYPE "apint" /// A utility function for allocating memory, checking for allocation failures, /// and ensuring the contents are zeroed. inline static uint64_t* getClearedMemory(unsigned numWords) { uint64_t * result = new uint64_t[numWords]; assert(result && "APInt memory allocation fails!"); memset(result, 0, numWords * sizeof(uint64_t)); return result; } /// A utility function for allocating memory and checking for allocation /// failure. The content is not zeroed. inline static uint64_t* getMemory(unsigned numWords) { uint64_t * result = new uint64_t[numWords]; assert(result && "APInt memory allocation fails!"); return result; } /// A utility function that converts a character to a digit. inline static unsigned getDigit(char cdigit, uint8_t radix) { unsigned r; if (radix == 16 || radix == 36) { r = cdigit - '0'; if (r <= 9) return r; r = cdigit - 'A'; if (r <= radix - 11U) return r + 10; r = cdigit - 'a'; if (r <= radix - 11U) return r + 10; radix = 10; } r = cdigit - '0'; if (r < radix) return r; return -1U; } void APInt::initSlowCase(unsigned numBits, uint64_t val, bool isSigned) { pVal = getClearedMemory(getNumWords()); pVal[0] = val; if (isSigned && int64_t(val) < 0) for (unsigned i = 1; i < getNumWords(); ++i) pVal[i] = -1ULL; } void APInt::initSlowCase(const APInt& that) { pVal = getMemory(getNumWords()); memcpy(pVal, that.pVal, getNumWords() * APINT_WORD_SIZE); } void APInt::initFromArray(ArrayRef<uint64_t> bigVal) { assert(BitWidth && "Bitwidth too small"); assert(bigVal.data() && "Null pointer detected!"); if (isSingleWord()) VAL = bigVal[0]; else { // Get memory, cleared to 0 pVal = getClearedMemory(getNumWords()); // Calculate the number of words to copy unsigned words = std::min<unsigned>(bigVal.size(), getNumWords()); // Copy the words from bigVal to pVal memcpy(pVal, bigVal.data(), words * APINT_WORD_SIZE); } // Make sure unused high bits are cleared clearUnusedBits(); } APInt::APInt(unsigned numBits, ArrayRef<uint64_t> bigVal) : BitWidth(numBits), VAL(0) { initFromArray(bigVal); } APInt::APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]) : BitWidth(numBits), VAL(0) { initFromArray(makeArrayRef(bigVal, numWords)); } APInt::APInt(unsigned numbits, StringRef Str, uint8_t radix) : BitWidth(numbits), VAL(0) { assert(BitWidth && "Bitwidth too small"); fromString(numbits, Str, radix); } APInt& APInt::AssignSlowCase(const APInt& RHS) { // Don't do anything for X = X if (this == &RHS) return *this; if (BitWidth == RHS.getBitWidth()) { // assume same bit-width single-word case is already handled assert(!isSingleWord()); memcpy(pVal, RHS.pVal, getNumWords() * APINT_WORD_SIZE); return *this; } if (isSingleWord()) { // assume case where both are single words is already handled assert(!RHS.isSingleWord()); VAL = 0; pVal = getMemory(RHS.getNumWords()); memcpy(pVal, RHS.pVal, RHS.getNumWords() * APINT_WORD_SIZE); } else if (getNumWords() == RHS.getNumWords()) memcpy(pVal, RHS.pVal, RHS.getNumWords() * APINT_WORD_SIZE); else if (RHS.isSingleWord()) { delete [] pVal; VAL = RHS.VAL; } else { delete [] pVal; pVal = getMemory(RHS.getNumWords()); memcpy(pVal, RHS.pVal, RHS.getNumWords() * APINT_WORD_SIZE); } BitWidth = RHS.BitWidth; return clearUnusedBits(); } APInt& APInt::operator=(uint64_t RHS) { if (isSingleWord()) VAL = RHS; else { pVal[0] = RHS; memset(pVal+1, 0, (getNumWords() - 1) * APINT_WORD_SIZE); } return clearUnusedBits(); } /// This method 'profiles' an APInt for use with FoldingSet. void APInt::Profile(FoldingSetNodeID& ID) const { ID.AddInteger(BitWidth); if (isSingleWord()) { ID.AddInteger(VAL); return; } unsigned NumWords = getNumWords(); for (unsigned i = 0; i < NumWords; ++i) ID.AddInteger(pVal[i]); } /// This function adds a single "digit" integer, y, to the multiple /// "digit" integer array, x[]. x[] is modified to reflect the addition and /// 1 is returned if there is a carry out, otherwise 0 is returned. /// @returns the carry of the addition. static bool add_1(uint64_t dest[], uint64_t x[], unsigned len, uint64_t y) { for (unsigned i = 0; i < len; ++i) { dest[i] = y + x[i]; if (dest[i] < y) y = 1; // Carry one to next digit. else { y = 0; // No need to carry so exit early break; } } return y; } /// @brief Prefix increment operator. Increments the APInt by one. APInt& APInt::operator++() { if (isSingleWord()) ++VAL; else add_1(pVal, pVal, getNumWords(), 1); return clearUnusedBits(); } /// This function subtracts a single "digit" (64-bit word), y, from /// the multi-digit integer array, x[], propagating the borrowed 1 value until /// no further borrowing is neeeded or it runs out of "digits" in x. The result /// is 1 if "borrowing" exhausted the digits in x, or 0 if x was not exhausted. /// In other words, if y > x then this function returns 1, otherwise 0. /// @returns the borrow out of the subtraction static bool sub_1(uint64_t x[], unsigned len, uint64_t y) { for (unsigned i = 0; i < len; ++i) { uint64_t X = x[i]; x[i] -= y; if (y > X) y = 1; // We have to "borrow 1" from next "digit" else { y = 0; // No need to borrow break; // Remaining digits are unchanged so exit early } } return bool(y); } /// @brief Prefix decrement operator. Decrements the APInt by one. APInt& APInt::operator--() { if (isSingleWord()) --VAL; else sub_1(pVal, getNumWords(), 1); return clearUnusedBits(); } /// This function adds the integer array x to the integer array Y and /// places the result in dest. /// @returns the carry out from the addition /// @brief General addition of 64-bit integer arrays static bool add(uint64_t *dest, const uint64_t *x, const uint64_t *y, unsigned len) { bool carry = false; for (unsigned i = 0; i< len; ++i) { uint64_t limit = std::min(x[i],y[i]); // must come first in case dest == x dest[i] = x[i] + y[i] + carry; carry = dest[i] < limit || (carry && dest[i] == limit); } return carry; } /// Adds the RHS APint to this APInt. /// @returns this, after addition of RHS. /// @brief Addition assignment operator. APInt& APInt::operator+=(const APInt& RHS) { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); if (isSingleWord()) VAL += RHS.VAL; else { add(pVal, pVal, RHS.pVal, getNumWords()); } return clearUnusedBits(); } /// Subtracts the integer array y from the integer array x /// @returns returns the borrow out. /// @brief Generalized subtraction of 64-bit integer arrays. static bool sub(uint64_t *dest, const uint64_t *x, const uint64_t *y, unsigned len) { bool borrow = false; for (unsigned i = 0; i < len; ++i) { uint64_t x_tmp = borrow ? x[i] - 1 : x[i]; borrow = y[i] > x_tmp || (borrow && x[i] == 0); dest[i] = x_tmp - y[i]; } return borrow; } /// Subtracts the RHS APInt from this APInt /// @returns this, after subtraction /// @brief Subtraction assignment operator. APInt& APInt::operator-=(const APInt& RHS) { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); if (isSingleWord()) VAL -= RHS.VAL; else sub(pVal, pVal, RHS.pVal, getNumWords()); return clearUnusedBits(); } /// Multiplies an integer array, x, by a uint64_t integer and places the result /// into dest. /// @returns the carry out of the multiplication. /// @brief Multiply a multi-digit APInt by a single digit (64-bit) integer. static uint64_t mul_1(uint64_t dest[], uint64_t x[], unsigned len, uint64_t y) { // Split y into high 32-bit part (hy) and low 32-bit part (ly) uint64_t ly = y & 0xffffffffULL, hy = y >> 32; uint64_t carry = 0; // For each digit of x. for (unsigned i = 0; i < len; ++i) { // Split x into high and low words uint64_t lx = x[i] & 0xffffffffULL; uint64_t hx = x[i] >> 32; // hasCarry - A flag to indicate if there is a carry to the next digit. // hasCarry == 0, no carry // hasCarry == 1, has carry // hasCarry == 2, no carry and the calculation result == 0. uint8_t hasCarry = 0; dest[i] = carry + lx * ly; // Determine if the add above introduces carry. hasCarry = (dest[i] < carry) ? 1 : 0; carry = hx * ly + (dest[i] >> 32) + (hasCarry ? (1ULL << 32) : 0); // The upper limit of carry can be (2^32 - 1)(2^32 - 1) + // (2^32 - 1) + 2^32 = 2^64. hasCarry = (!carry && hasCarry) ? 1 : (!carry ? 2 : 0); carry += (lx * hy) & 0xffffffffULL; dest[i] = (carry << 32) | (dest[i] & 0xffffffffULL); carry = (((!carry && hasCarry != 2) || hasCarry == 1) ? (1ULL << 32) : 0) + (carry >> 32) + ((lx * hy) >> 32) + hx * hy; } return carry; } /// Multiplies integer array x by integer array y and stores the result into /// the integer array dest. Note that dest's size must be >= xlen + ylen. /// @brief Generalized multiplicate of integer arrays. static void mul(uint64_t dest[], uint64_t x[], unsigned xlen, uint64_t y[], unsigned ylen) { dest[xlen] = mul_1(dest, x, xlen, y[0]); for (unsigned i = 1; i < ylen; ++i) { uint64_t ly = y[i] & 0xffffffffULL, hy = y[i] >> 32; uint64_t carry = 0, lx = 0, hx = 0; for (unsigned j = 0; j < xlen; ++j) { lx = x[j] & 0xffffffffULL; hx = x[j] >> 32; // hasCarry - A flag to indicate if has carry. // hasCarry == 0, no carry // hasCarry == 1, has carry // hasCarry == 2, no carry and the calculation result == 0. uint8_t hasCarry = 0; uint64_t resul = carry + lx * ly; hasCarry = (resul < carry) ? 1 : 0; carry = (hasCarry ? (1ULL << 32) : 0) + hx * ly + (resul >> 32); hasCarry = (!carry && hasCarry) ? 1 : (!carry ? 2 : 0); carry += (lx * hy) & 0xffffffffULL; resul = (carry << 32) | (resul & 0xffffffffULL); dest[i+j] += resul; carry = (((!carry && hasCarry != 2) || hasCarry == 1) ? (1ULL << 32) : 0)+ (carry >> 32) + (dest[i+j] < resul ? 1 : 0) + ((lx * hy) >> 32) + hx * hy; } dest[i+xlen] = carry; } } APInt& APInt::operator*=(const APInt& RHS) { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); if (isSingleWord()) { VAL *= RHS.VAL; clearUnusedBits(); return *this; } // Get some bit facts about LHS and check for zero unsigned lhsBits = getActiveBits(); unsigned lhsWords = !lhsBits ? 0 : whichWord(lhsBits - 1) + 1; if (!lhsWords) // 0 * X ===> 0 return *this; // Get some bit facts about RHS and check for zero unsigned rhsBits = RHS.getActiveBits(); unsigned rhsWords = !rhsBits ? 0 : whichWord(rhsBits - 1) + 1; if (!rhsWords) { // X * 0 ===> 0 clearAllBits(); return *this; } // Allocate space for the result unsigned destWords = rhsWords + lhsWords; uint64_t *dest = getMemory(destWords); // Perform the long multiply mul(dest, pVal, lhsWords, RHS.pVal, rhsWords); // Copy result back into *this clearAllBits(); unsigned wordsToCopy = destWords >= getNumWords() ? getNumWords() : destWords; memcpy(pVal, dest, wordsToCopy * APINT_WORD_SIZE); clearUnusedBits(); // delete dest array and return delete[] dest; return *this; } APInt& APInt::operator&=(const APInt& RHS) { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); if (isSingleWord()) { VAL &= RHS.VAL; return *this; } unsigned numWords = getNumWords(); for (unsigned i = 0; i < numWords; ++i) pVal[i] &= RHS.pVal[i]; return *this; } APInt& APInt::operator|=(const APInt& RHS) { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); if (isSingleWord()) { VAL |= RHS.VAL; return *this; } unsigned numWords = getNumWords(); for (unsigned i = 0; i < numWords; ++i) pVal[i] |= RHS.pVal[i]; return *this; } APInt& APInt::operator^=(const APInt& RHS) { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); if (isSingleWord()) { VAL ^= RHS.VAL; this->clearUnusedBits(); return *this; } unsigned numWords = getNumWords(); for (unsigned i = 0; i < numWords; ++i) pVal[i] ^= RHS.pVal[i]; return clearUnusedBits(); } APInt APInt::AndSlowCase(const APInt& RHS) const { unsigned numWords = getNumWords(); uint64_t* val = getMemory(numWords); for (unsigned i = 0; i < numWords; ++i) val[i] = pVal[i] & RHS.pVal[i]; return APInt(val, getBitWidth()); } APInt APInt::OrSlowCase(const APInt& RHS) const { unsigned numWords = getNumWords(); uint64_t *val = getMemory(numWords); for (unsigned i = 0; i < numWords; ++i) val[i] = pVal[i] | RHS.pVal[i]; return APInt(val, getBitWidth()); } APInt APInt::XorSlowCase(const APInt& RHS) const { unsigned numWords = getNumWords(); uint64_t *val = getMemory(numWords); for (unsigned i = 0; i < numWords; ++i) val[i] = pVal[i] ^ RHS.pVal[i]; APInt Result(val, getBitWidth()); // 0^0==1 so clear the high bits in case they got set. Result.clearUnusedBits(); return Result; } APInt APInt::operator*(const APInt& RHS) const { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); if (isSingleWord()) return APInt(BitWidth, VAL * RHS.VAL); APInt Result(*this); Result *= RHS; return Result; } APInt APInt::operator+(const APInt& RHS) const { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); if (isSingleWord()) return APInt(BitWidth, VAL + RHS.VAL); APInt Result(BitWidth, 0); add(Result.pVal, this->pVal, RHS.pVal, getNumWords()); Result.clearUnusedBits(); return Result; } APInt APInt::operator-(const APInt& RHS) const { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); if (isSingleWord()) return APInt(BitWidth, VAL - RHS.VAL); APInt Result(BitWidth, 0); sub(Result.pVal, this->pVal, RHS.pVal, getNumWords()); Result.clearUnusedBits(); return Result; } bool APInt::EqualSlowCase(const APInt& RHS) const { // Get some facts about the number of bits used in the two operands. unsigned n1 = getActiveBits(); unsigned n2 = RHS.getActiveBits(); // If the number of bits isn't the same, they aren't equal if (n1 != n2) return false; // If the number of bits fits in a word, we only need to compare the low word. if (n1 <= APINT_BITS_PER_WORD) return pVal[0] == RHS.pVal[0]; // Otherwise, compare everything for (int i = whichWord(n1 - 1); i >= 0; --i) if (pVal[i] != RHS.pVal[i]) return false; return true; } bool APInt::EqualSlowCase(uint64_t Val) const { unsigned n = getActiveBits(); if (n <= APINT_BITS_PER_WORD) return pVal[0] == Val; else return false; } bool APInt::ult(const APInt& RHS) const { assert(BitWidth == RHS.BitWidth && "Bit widths must be same for comparison"); if (isSingleWord()) return VAL < RHS.VAL; // Get active bit length of both operands unsigned n1 = getActiveBits(); unsigned n2 = RHS.getActiveBits(); // If magnitude of LHS is less than RHS, return true. if (n1 < n2) return true; // If magnitude of RHS is greather than LHS, return false. if (n2 < n1) return false; // If they bot fit in a word, just compare the low order word if (n1 <= APINT_BITS_PER_WORD && n2 <= APINT_BITS_PER_WORD) return pVal[0] < RHS.pVal[0]; // Otherwise, compare all words unsigned topWord = whichWord(std::max(n1,n2)-1); for (int i = topWord; i >= 0; --i) { if (pVal[i] > RHS.pVal[i]) return false; if (pVal[i] < RHS.pVal[i]) return true; } return false; } bool APInt::slt(const APInt& RHS) const { assert(BitWidth == RHS.BitWidth && "Bit widths must be same for comparison"); if (isSingleWord()) { int64_t lhsSext = (int64_t(VAL) << (64-BitWidth)) >> (64-BitWidth); int64_t rhsSext = (int64_t(RHS.VAL) << (64-BitWidth)) >> (64-BitWidth); return lhsSext < rhsSext; } APInt lhs(*this); APInt rhs(RHS); bool lhsNeg = isNegative(); bool rhsNeg = rhs.isNegative(); if (lhsNeg) { // Sign bit is set so perform two's complement to make it positive lhs.flipAllBits(); ++lhs; } if (rhsNeg) { // Sign bit is set so perform two's complement to make it positive rhs.flipAllBits(); ++rhs; } // Now we have unsigned values to compare so do the comparison if necessary // based on the negativeness of the values. if (lhsNeg) if (rhsNeg) return lhs.ugt(rhs); else return true; else if (rhsNeg) return false; else return lhs.ult(rhs); } void APInt::setBit(unsigned bitPosition) { if (isSingleWord()) VAL |= maskBit(bitPosition); else pVal[whichWord(bitPosition)] |= maskBit(bitPosition); } /// Set the given bit to 0 whose position is given as "bitPosition". /// @brief Set a given bit to 0. void APInt::clearBit(unsigned bitPosition) { if (isSingleWord()) VAL &= ~maskBit(bitPosition); else pVal[whichWord(bitPosition)] &= ~maskBit(bitPosition); } /// @brief Toggle every bit to its opposite value. /// Toggle a given bit to its opposite value whose position is given /// as "bitPosition". /// @brief Toggles a given bit to its opposite value. void APInt::flipBit(unsigned bitPosition) { assert(bitPosition < BitWidth && "Out of the bit-width range!"); if ((*this)[bitPosition]) clearBit(bitPosition); else setBit(bitPosition); } unsigned APInt::getBitsNeeded(StringRef str, uint8_t radix) { assert(!str.empty() && "Invalid string length"); assert((radix == 10 || radix == 8 || radix == 16 || radix == 2 || radix == 36) && "Radix should be 2, 8, 10, 16, or 36!"); size_t slen = str.size(); // Each computation below needs to know if it's negative. StringRef::iterator p = str.begin(); unsigned isNegative = *p == '-'; if (*p == '-' || *p == '+') { p++; slen--; assert(slen && "String is only a sign, needs a value."); } // For radixes of power-of-two values, the bits required is accurately and // easily computed if (radix == 2) return slen + isNegative; if (radix == 8) return slen * 3 + isNegative; if (radix == 16) return slen * 4 + isNegative; // FIXME: base 36 // This is grossly inefficient but accurate. We could probably do something // with a computation of roughly slen*64/20 and then adjust by the value of // the first few digits. But, I'm not sure how accurate that could be. // Compute a sufficient number of bits that is always large enough but might // be too large. This avoids the assertion in the constructor. This // calculation doesn't work appropriately for the numbers 0-9, so just use 4 // bits in that case. unsigned sufficient = radix == 10? (slen == 1 ? 4 : slen * 64/18) : (slen == 1 ? 7 : slen * 16/3); // Convert to the actual binary value. APInt tmp(sufficient, StringRef(p, slen), radix); // Compute how many bits are required. If the log is infinite, assume we need // just bit. unsigned log = tmp.logBase2(); if (log == (unsigned)-1) { return isNegative + 1; } else { return isNegative + log + 1; } } hash_code llvm::hash_value(const APInt &Arg) { if (Arg.isSingleWord()) return hash_combine(Arg.VAL); return hash_combine_range(Arg.pVal, Arg.pVal + Arg.getNumWords()); } bool APInt::isSplat(unsigned SplatSizeInBits) const { assert(getBitWidth() % SplatSizeInBits == 0 && "SplatSizeInBits must divide width!"); // We can check that all parts of an integer are equal by making use of a // little trick: rotate and check if it's still the same value. return *this == rotl(SplatSizeInBits); } /// This function returns the high "numBits" bits of this APInt. APInt APInt::getHiBits(unsigned numBits) const { return APIntOps::lshr(*this, BitWidth - numBits); } /// This function returns the low "numBits" bits of this APInt. APInt APInt::getLoBits(unsigned numBits) const { return APIntOps::lshr(APIntOps::shl(*this, BitWidth - numBits), BitWidth - numBits); } unsigned APInt::countLeadingZerosSlowCase() const { // Treat the most significand word differently because it might have // meaningless bits set beyond the precision. unsigned BitsInMSW = BitWidth % APINT_BITS_PER_WORD; integerPart MSWMask; if (BitsInMSW) MSWMask = (integerPart(1) << BitsInMSW) - 1; else { MSWMask = ~integerPart(0); BitsInMSW = APINT_BITS_PER_WORD; } unsigned i = getNumWords(); integerPart MSW = pVal[i-1] & MSWMask; if (MSW) return llvm::countLeadingZeros(MSW) - (APINT_BITS_PER_WORD - BitsInMSW); unsigned Count = BitsInMSW; for (--i; i > 0u; --i) { if (pVal[i-1] == 0) Count += APINT_BITS_PER_WORD; else { Count += llvm::countLeadingZeros(pVal[i-1]); break; } } return Count; } unsigned APInt::countLeadingOnes() const { if (isSingleWord()) return llvm::countLeadingOnes(VAL << (APINT_BITS_PER_WORD - BitWidth)); unsigned highWordBits = BitWidth % APINT_BITS_PER_WORD; unsigned shift; if (!highWordBits) { highWordBits = APINT_BITS_PER_WORD; shift = 0; } else { shift = APINT_BITS_PER_WORD - highWordBits; } int i = getNumWords() - 1; unsigned Count = llvm::countLeadingOnes(pVal[i] << shift); if (Count == highWordBits) { for (i--; i >= 0; --i) { if (pVal[i] == -1ULL) Count += APINT_BITS_PER_WORD; else { Count += llvm::countLeadingOnes(pVal[i]); break; } } } return Count; } unsigned APInt::countTrailingZeros() const { if (isSingleWord()) return std::min(unsigned(llvm::countTrailingZeros(VAL)), BitWidth); unsigned Count = 0; unsigned i = 0; for (; i < getNumWords() && pVal[i] == 0; ++i) Count += APINT_BITS_PER_WORD; if (i < getNumWords()) Count += llvm::countTrailingZeros(pVal[i]); return std::min(Count, BitWidth); } unsigned APInt::countTrailingOnesSlowCase() const { unsigned Count = 0; unsigned i = 0; for (; i < getNumWords() && pVal[i] == -1ULL; ++i) Count += APINT_BITS_PER_WORD; if (i < getNumWords()) Count += llvm::countTrailingOnes(pVal[i]); return std::min(Count, BitWidth); } unsigned APInt::countPopulationSlowCase() const { unsigned Count = 0; for (unsigned i = 0; i < getNumWords(); ++i) Count += llvm::countPopulation(pVal[i]); return Count; } /// Perform a logical right-shift from Src to Dst, which must be equal or /// non-overlapping, of Words words, by Shift, which must be less than 64. static void lshrNear(uint64_t *Dst, uint64_t *Src, unsigned Words, unsigned Shift) { uint64_t Carry = 0; for (int I = Words - 1; I >= 0; --I) { uint64_t Tmp = Src[I]; Dst[I] = (Tmp >> Shift) | Carry; Carry = Tmp << (64 - Shift); } } APInt APInt::byteSwap() const { assert(BitWidth >= 16 && BitWidth % 16 == 0 && "Cannot byteswap!"); if (BitWidth == 16) return APInt(BitWidth, ByteSwap_16(uint16_t(VAL))); if (BitWidth == 32) return APInt(BitWidth, ByteSwap_32(unsigned(VAL))); if (BitWidth == 48) { unsigned Tmp1 = unsigned(VAL >> 16); Tmp1 = ByteSwap_32(Tmp1); uint16_t Tmp2 = uint16_t(VAL); Tmp2 = ByteSwap_16(Tmp2); return APInt(BitWidth, (uint64_t(Tmp2) << 32) | Tmp1); } if (BitWidth == 64) return APInt(BitWidth, ByteSwap_64(VAL)); APInt Result(getNumWords() * APINT_BITS_PER_WORD, 0); for (unsigned I = 0, N = getNumWords(); I != N; ++I) Result.pVal[I] = ByteSwap_64(pVal[N - I - 1]); if (Result.BitWidth != BitWidth) { lshrNear(Result.pVal, Result.pVal, getNumWords(), Result.BitWidth - BitWidth); Result.BitWidth = BitWidth; } return Result; } APInt llvm::APIntOps::GreatestCommonDivisor(const APInt& API1, const APInt& API2) { APInt A = API1, B = API2; while (!!B) { APInt T = B; B = APIntOps::urem(A, B); A = T; } return A; } APInt llvm::APIntOps::RoundDoubleToAPInt(double Double, unsigned width) { union { double D; uint64_t I; } T; T.D = Double; // Get the sign bit from the highest order bit bool isNeg = T.I >> 63; // Get the 11-bit exponent and adjust for the 1023 bit bias int64_t exp = ((T.I >> 52) & 0x7ff) - 1023; // If the exponent is negative, the value is < 0 so just return 0. if (exp < 0) return APInt(width, 0u); // Extract the mantissa by clearing the top 12 bits (sign + exponent). uint64_t mantissa = (T.I & (~0ULL >> 12)) | 1ULL << 52; // If the exponent doesn't shift all bits out of the mantissa if (exp < 52) return isNeg ? -APInt(width, mantissa >> (52 - exp)) : APInt(width, mantissa >> (52 - exp)); // If the client didn't provide enough bits for us to shift the mantissa into // then the result is undefined, just return 0 if (width <= exp - 52) return APInt(width, 0); // Otherwise, we have to shift the mantissa bits up to the right location APInt Tmp(width, mantissa); Tmp = Tmp.shl((unsigned)exp - 52); return isNeg ? -Tmp : Tmp; } /// This function converts this APInt to a double. /// The layout for double is as following (IEEE Standard 754): /// -------------------------------------- /// | Sign Exponent Fraction Bias | /// |-------------------------------------- | /// | 1[63] 11[62-52] 52[51-00] 1023 | /// -------------------------------------- double APInt::roundToDouble(bool isSigned) const { // Handle the simple case where the value is contained in one uint64_t. // It is wrong to optimize getWord(0) to VAL; there might be more than one word. if (isSingleWord() || getActiveBits() <= APINT_BITS_PER_WORD) { if (isSigned) { int64_t sext = (int64_t(getWord(0)) << (64-BitWidth)) >> (64-BitWidth); return double(sext); } else return double(getWord(0)); } // Determine if the value is negative. bool isNeg = isSigned ? (*this)[BitWidth-1] : false; // Construct the absolute value if we're negative. APInt Tmp(isNeg ? -(*this) : (*this)); // Figure out how many bits we're using. unsigned n = Tmp.getActiveBits(); // The exponent (without bias normalization) is just the number of bits // we are using. Note that the sign bit is gone since we constructed the // absolute value. uint64_t exp = n; // Return infinity for exponent overflow if (exp > 1023) { if (!isSigned || !isNeg) return std::numeric_limits<double>::infinity(); else return -std::numeric_limits<double>::infinity(); } exp += 1023; // Increment for 1023 bias // Number of bits in mantissa is 52. To obtain the mantissa value, we must // extract the high 52 bits from the correct words in pVal. uint64_t mantissa; unsigned hiWord = whichWord(n-1); if (hiWord == 0) { mantissa = Tmp.pVal[0]; if (n > 52) mantissa >>= n - 52; // shift down, we want the top 52 bits. } else { assert(hiWord > 0 && "huh?"); uint64_t hibits = Tmp.pVal[hiWord] << (52 - n % APINT_BITS_PER_WORD); uint64_t lobits = Tmp.pVal[hiWord-1] >> (11 + n % APINT_BITS_PER_WORD); mantissa = hibits | lobits; } // The leading bit of mantissa is implicit, so get rid of it. uint64_t sign = isNeg ? (1ULL << (APINT_BITS_PER_WORD - 1)) : 0; union { double D; uint64_t I; } T; T.I = sign | (exp << 52) | mantissa; return T.D; } // Truncate to new width. APInt APInt::trunc(unsigned width) const { assert(width < BitWidth && "Invalid APInt Truncate request"); assert(width && "Can't truncate to 0 bits"); if (width <= APINT_BITS_PER_WORD) return APInt(width, getRawData()[0]); APInt Result(getMemory(getNumWords(width)), width); // Copy full words. unsigned i; for (i = 0; i != width / APINT_BITS_PER_WORD; i++) Result.pVal[i] = pVal[i]; // Truncate and copy any partial word. unsigned bits = (0 - width) % APINT_BITS_PER_WORD; if (bits != 0) Result.pVal[i] = pVal[i] << bits >> bits; return Result; } // Sign extend to a new width. APInt APInt::sext(unsigned width) const { assert(width > BitWidth && "Invalid APInt SignExtend request"); if (width <= APINT_BITS_PER_WORD) { uint64_t val = VAL << (APINT_BITS_PER_WORD - BitWidth); val = (int64_t)val >> (width - BitWidth); return APInt(width, val >> (APINT_BITS_PER_WORD - width)); } APInt Result(getMemory(getNumWords(width)), width); // Copy full words. unsigned i; uint64_t word = 0; for (i = 0; i != BitWidth / APINT_BITS_PER_WORD; i++) { word = getRawData()[i]; Result.pVal[i] = word; } // Read and sign-extend any partial word. unsigned bits = (0 - BitWidth) % APINT_BITS_PER_WORD; if (bits != 0) word = (int64_t)getRawData()[i] << bits >> bits; else word = (int64_t)word >> (APINT_BITS_PER_WORD - 1); // Write remaining full words. for (; i != width / APINT_BITS_PER_WORD; i++) { Result.pVal[i] = word; word = (int64_t)word >> (APINT_BITS_PER_WORD - 1); } // Write any partial word. bits = (0 - width) % APINT_BITS_PER_WORD; if (bits != 0) Result.pVal[i] = word << bits >> bits; return Result; } // Zero extend to a new width. APInt APInt::zext(unsigned width) const { assert(width > BitWidth && "Invalid APInt ZeroExtend request"); if (width <= APINT_BITS_PER_WORD) return APInt(width, VAL); APInt Result(getMemory(getNumWords(width)), width); // Copy words. unsigned i; for (i = 0; i != getNumWords(); i++) Result.pVal[i] = getRawData()[i]; // Zero remaining words. memset(&Result.pVal[i], 0, (Result.getNumWords() - i) * APINT_WORD_SIZE); return Result; } APInt APInt::zextOrTrunc(unsigned width) const { if (BitWidth < width) return zext(width); if (BitWidth > width) return trunc(width); return *this; } APInt APInt::sextOrTrunc(unsigned width) const { if (BitWidth < width) return sext(width); if (BitWidth > width) return trunc(width); return *this; } APInt APInt::zextOrSelf(unsigned width) const { if (BitWidth < width) return zext(width); return *this; } APInt APInt::sextOrSelf(unsigned width) const { if (BitWidth < width) return sext(width); return *this; } /// Arithmetic right-shift this APInt by shiftAmt. /// @brief Arithmetic right-shift function. APInt APInt::ashr(const APInt &shiftAmt) const { return ashr((unsigned)shiftAmt.getLimitedValue(BitWidth)); } /// Arithmetic right-shift this APInt by shiftAmt. /// @brief Arithmetic right-shift function. APInt APInt::ashr(unsigned shiftAmt) const { assert(shiftAmt <= BitWidth && "Invalid shift amount"); // Handle a degenerate case if (shiftAmt == 0) return *this; // Handle single word shifts with built-in ashr if (isSingleWord()) { if (shiftAmt == BitWidth) return APInt(BitWidth, 0); // undefined else { unsigned SignBit = APINT_BITS_PER_WORD - BitWidth; return APInt(BitWidth, (((int64_t(VAL) << SignBit) >> SignBit) >> shiftAmt)); } } // If all the bits were shifted out, the result is, technically, undefined. // We return -1 if it was negative, 0 otherwise. We check this early to avoid // issues in the algorithm below. if (shiftAmt == BitWidth) { if (isNegative()) return APInt(BitWidth, -1ULL, true); else return APInt(BitWidth, 0); } // Create some space for the result. uint64_t * val = new uint64_t[getNumWords()]; // Compute some values needed by the following shift algorithms unsigned wordShift = shiftAmt % APINT_BITS_PER_WORD; // bits to shift per word unsigned offset = shiftAmt / APINT_BITS_PER_WORD; // word offset for shift unsigned breakWord = getNumWords() - 1 - offset; // last word affected unsigned bitsInWord = whichBit(BitWidth); // how many bits in last word? if (bitsInWord == 0) bitsInWord = APINT_BITS_PER_WORD; // If we are shifting whole words, just move whole words if (wordShift == 0) { // Move the words containing significant bits for (unsigned i = 0; i <= breakWord; ++i) val[i] = pVal[i+offset]; // move whole word // Adjust the top significant word for sign bit fill, if negative if (isNegative()) if (bitsInWord < APINT_BITS_PER_WORD) val[breakWord] |= ~0ULL << bitsInWord; // set high bits } else { // Shift the low order words for (unsigned i = 0; i < breakWord; ++i) { // This combines the shifted corresponding word with the low bits from // the next word (shifted into this word's high bits). val[i] = (pVal[i+offset] >> wordShift) | (pVal[i+offset+1] << (APINT_BITS_PER_WORD - wordShift)); } // Shift the break word. In this case there are no bits from the next word // to include in this word. val[breakWord] = pVal[breakWord+offset] >> wordShift; // Deal with sign extension in the break word, and possibly the word before // it. if (isNegative()) { if (wordShift > bitsInWord) { if (breakWord > 0) val[breakWord-1] |= ~0ULL << (APINT_BITS_PER_WORD - (wordShift - bitsInWord)); val[breakWord] |= ~0ULL; } else val[breakWord] |= (~0ULL << (bitsInWord - wordShift)); } } // Remaining words are 0 or -1, just assign them. uint64_t fillValue = (isNegative() ? -1ULL : 0); for (unsigned i = breakWord+1; i < getNumWords(); ++i) val[i] = fillValue; APInt Result(val, BitWidth); Result.clearUnusedBits(); return Result; } /// Logical right-shift this APInt by shiftAmt. /// @brief Logical right-shift function. APInt APInt::lshr(const APInt &shiftAmt) const { return lshr((unsigned)shiftAmt.getLimitedValue(BitWidth)); } /// Logical right-shift this APInt by shiftAmt. /// @brief Logical right-shift function. APInt APInt::lshr(unsigned shiftAmt) const { if (isSingleWord()) { if (shiftAmt >= BitWidth) return APInt(BitWidth, 0); else return APInt(BitWidth, this->VAL >> shiftAmt); } // If all the bits were shifted out, the result is 0. This avoids issues // with shifting by the size of the integer type, which produces undefined // results. We define these "undefined results" to always be 0. if (shiftAmt >= BitWidth) return APInt(BitWidth, 0); // If none of the bits are shifted out, the result is *this. This avoids // issues with shifting by the size of the integer type, which produces // undefined results in the code below. This is also an optimization. if (shiftAmt == 0) return *this; // Create some space for the result. uint64_t * val = new uint64_t[getNumWords()]; // If we are shifting less than a word, compute the shift with a simple carry if (shiftAmt < APINT_BITS_PER_WORD) { lshrNear(val, pVal, getNumWords(), shiftAmt); APInt Result(val, BitWidth); Result.clearUnusedBits(); return Result; } // Compute some values needed by the remaining shift algorithms unsigned wordShift = shiftAmt % APINT_BITS_PER_WORD; unsigned offset = shiftAmt / APINT_BITS_PER_WORD; // If we are shifting whole words, just move whole words if (wordShift == 0) { for (unsigned i = 0; i < getNumWords() - offset; ++i) val[i] = pVal[i+offset]; for (unsigned i = getNumWords()-offset; i < getNumWords(); i++) val[i] = 0; APInt Result(val, BitWidth); Result.clearUnusedBits(); return Result; } // Shift the low order words unsigned breakWord = getNumWords() - offset -1; for (unsigned i = 0; i < breakWord; ++i) val[i] = (pVal[i+offset] >> wordShift) | (pVal[i+offset+1] << (APINT_BITS_PER_WORD - wordShift)); // Shift the break word. val[breakWord] = pVal[breakWord+offset] >> wordShift; // Remaining words are 0 for (unsigned i = breakWord+1; i < getNumWords(); ++i) val[i] = 0; APInt Result(val, BitWidth); Result.clearUnusedBits(); return Result; } /// Left-shift this APInt by shiftAmt. /// @brief Left-shift function. APInt APInt::shl(const APInt &shiftAmt) const { // It's undefined behavior in C to shift by BitWidth or greater. return shl((unsigned)shiftAmt.getLimitedValue(BitWidth)); } APInt APInt::shlSlowCase(unsigned shiftAmt) const { // If all the bits were shifted out, the result is 0. This avoids issues // with shifting by the size of the integer type, which produces undefined // results. We define these "undefined results" to always be 0. if (shiftAmt == BitWidth) return APInt(BitWidth, 0); // If none of the bits are shifted out, the result is *this. This avoids a // lshr by the words size in the loop below which can produce incorrect // results. It also avoids the expensive computation below for a common case. if (shiftAmt == 0) return *this; // Create some space for the result. uint64_t * val = new uint64_t[getNumWords()]; // If we are shifting less than a word, do it the easy way if (shiftAmt < APINT_BITS_PER_WORD) { uint64_t carry = 0; for (unsigned i = 0; i < getNumWords(); i++) { val[i] = pVal[i] << shiftAmt | carry; carry = pVal[i] >> (APINT_BITS_PER_WORD - shiftAmt); } APInt Result(val, BitWidth); Result.clearUnusedBits(); return Result; } // Compute some values needed by the remaining shift algorithms unsigned wordShift = shiftAmt % APINT_BITS_PER_WORD; unsigned offset = shiftAmt / APINT_BITS_PER_WORD; // If we are shifting whole words, just move whole words if (wordShift == 0) { for (unsigned i = 0; i < offset; i++) val[i] = 0; for (unsigned i = offset; i < getNumWords(); i++) val[i] = pVal[i-offset]; APInt Result(val, BitWidth); Result.clearUnusedBits(); return Result; } // Copy whole words from this to Result. unsigned i = getNumWords() - 1; for (; i > offset; --i) val[i] = pVal[i-offset] << wordShift | pVal[i-offset-1] >> (APINT_BITS_PER_WORD - wordShift); val[offset] = pVal[0] << wordShift; for (i = 0; i < offset; ++i) val[i] = 0; APInt Result(val, BitWidth); Result.clearUnusedBits(); return Result; } APInt APInt::rotl(const APInt &rotateAmt) const { return rotl((unsigned)rotateAmt.getLimitedValue(BitWidth)); } APInt APInt::rotl(unsigned rotateAmt) const { rotateAmt %= BitWidth; if (rotateAmt == 0) return *this; return shl(rotateAmt) | lshr(BitWidth - rotateAmt); } APInt APInt::rotr(const APInt &rotateAmt) const { return rotr((unsigned)rotateAmt.getLimitedValue(BitWidth)); } APInt APInt::rotr(unsigned rotateAmt) const { rotateAmt %= BitWidth; if (rotateAmt == 0) return *this; return lshr(rotateAmt) | shl(BitWidth - rotateAmt); } // Square Root - this method computes and returns the square root of "this". // Three mechanisms are used for computation. For small values (<= 5 bits), // a table lookup is done. This gets some performance for common cases. For // values using less than 52 bits, the value is converted to double and then // the libc sqrt function is called. The result is rounded and then converted // back to a uint64_t which is then used to construct the result. Finally, // the Babylonian method for computing square roots is used. APInt APInt::sqrt() const { // Determine the magnitude of the value. unsigned magnitude = getActiveBits(); // Use a fast table for some small values. This also gets rid of some // rounding errors in libc sqrt for small values. if (magnitude <= 5) { static const uint8_t results[32] = { /* 0 */ 0, /* 1- 2 */ 1, 1, /* 3- 6 */ 2, 2, 2, 2, /* 7-12 */ 3, 3, 3, 3, 3, 3, /* 13-20 */ 4, 4, 4, 4, 4, 4, 4, 4, /* 21-30 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, /* 31 */ 6 }; return APInt(BitWidth, results[ (isSingleWord() ? VAL : pVal[0]) ]); } // If the magnitude of the value fits in less than 52 bits (the precision of // an IEEE double precision floating point value), then we can use the // libc sqrt function which will probably use a hardware sqrt computation. // This should be faster than the algorithm below. if (magnitude < 52) { return APInt(BitWidth, uint64_t(::round(::sqrt(double(isSingleWord()?VAL:pVal[0]))))); } // Okay, all the short cuts are exhausted. We must compute it. The following // is a classical Babylonian method for computing the square root. This code // was adapted to APInt from a wikipedia article on such computations. // See http://www.wikipedia.org/ and go to the page named // Calculate_an_integer_square_root. unsigned nbits = BitWidth, i = 4; APInt testy(BitWidth, 16); APInt x_old(BitWidth, 1); APInt x_new(BitWidth, 0); APInt two(BitWidth, 2); // Select a good starting value using binary logarithms. for (;; i += 2, testy = testy.shl(2)) if (i >= nbits || this->ule(testy)) { x_old = x_old.shl(i / 2); break; } // Use the Babylonian method to arrive at the integer square root: for (;;) { x_new = (this->udiv(x_old) + x_old).udiv(two); if (x_old.ule(x_new)) break; x_old = x_new; } // Make sure we return the closest approximation // NOTE: The rounding calculation below is correct. It will produce an // off-by-one discrepancy with results from pari/gp. That discrepancy has been // determined to be a rounding issue with pari/gp as it begins to use a // floating point representation after 192 bits. There are no discrepancies // between this algorithm and pari/gp for bit widths < 192 bits. APInt square(x_old * x_old); APInt nextSquare((x_old + 1) * (x_old +1)); if (this->ult(square)) return x_old; assert(this->ule(nextSquare) && "Error in APInt::sqrt computation"); APInt midpoint((nextSquare - square).udiv(two)); APInt offset(*this - square); if (offset.ult(midpoint)) return x_old; return x_old + 1; } /// Computes the multiplicative inverse of this APInt for a given modulo. The /// iterative extended Euclidean algorithm is used to solve for this value, /// however we simplify it to speed up calculating only the inverse, and take /// advantage of div+rem calculations. We also use some tricks to avoid copying /// (potentially large) APInts around. APInt APInt::multiplicativeInverse(const APInt& modulo) const { assert(ult(modulo) && "This APInt must be smaller than the modulo"); // Using the properties listed at the following web page (accessed 06/21/08): // http://www.numbertheory.org/php/euclid.html // (especially the properties numbered 3, 4 and 9) it can be proved that // BitWidth bits suffice for all the computations in the algorithm implemented // below. More precisely, this number of bits suffice if the multiplicative // inverse exists, but may not suffice for the general extended Euclidean // algorithm. APInt r[2] = { modulo, *this }; APInt t[2] = { APInt(BitWidth, 0), APInt(BitWidth, 1) }; APInt q(BitWidth, 0); unsigned i; for (i = 0; r[i^1] != 0; i ^= 1) { // An overview of the math without the confusing bit-flipping: // q = r[i-2] / r[i-1] // r[i] = r[i-2] % r[i-1] // t[i] = t[i-2] - t[i-1] * q udivrem(r[i], r[i^1], q, r[i]); t[i] -= t[i^1] * q; } // If this APInt and the modulo are not coprime, there is no multiplicative // inverse, so return 0. We check this by looking at the next-to-last // remainder, which is the gcd(*this,modulo) as calculated by the Euclidean // algorithm. if (r[i] != 1) return APInt(BitWidth, 0); // The next-to-last t is the multiplicative inverse. However, we are // interested in a positive inverse. Calcuate a positive one from a negative // one if necessary. A simple addition of the modulo suffices because // abs(t[i]) is known to be less than *this/2 (see the link above). return t[i].isNegative() ? t[i] + modulo : t[i]; } /// Calculate the magic numbers required to implement a signed integer division /// by a constant as a sequence of multiplies, adds and shifts. Requires that /// the divisor not be 0, 1, or -1. Taken from "Hacker's Delight", Henry S. /// Warren, Jr., chapter 10. APInt::ms APInt::magic() const { const APInt& d = *this; unsigned p; APInt ad, anc, delta, q1, r1, q2, r2, t; APInt signedMin = APInt::getSignedMinValue(d.getBitWidth()); struct ms mag; ad = d.abs(); t = signedMin + (d.lshr(d.getBitWidth() - 1)); anc = t - 1 - t.urem(ad); // absolute value of nc p = d.getBitWidth() - 1; // initialize p q1 = signedMin.udiv(anc); // initialize q1 = 2p/abs(nc) r1 = signedMin - q1*anc; // initialize r1 = rem(2p,abs(nc)) q2 = signedMin.udiv(ad); // initialize q2 = 2p/abs(d) r2 = signedMin - q2*ad; // initialize r2 = rem(2p,abs(d)) do { p = p + 1; q1 = q1<<1; // update q1 = 2p/abs(nc) r1 = r1<<1; // update r1 = rem(2p/abs(nc)) if (r1.uge(anc)) { // must be unsigned comparison q1 = q1 + 1; r1 = r1 - anc; } q2 = q2<<1; // update q2 = 2p/abs(d) r2 = r2<<1; // update r2 = rem(2p/abs(d)) if (r2.uge(ad)) { // must be unsigned comparison q2 = q2 + 1; r2 = r2 - ad; } delta = ad - r2; } while (q1.ult(delta) || (q1 == delta && r1 == 0)); mag.m = q2 + 1; if (d.isNegative()) mag.m = -mag.m; // resulting magic number mag.s = p - d.getBitWidth(); // resulting shift return mag; } /// Calculate the magic numbers required to implement an unsigned integer /// division by a constant as a sequence of multiplies, adds and shifts. /// Requires that the divisor not be 0. Taken from "Hacker's Delight", Henry /// S. Warren, Jr., chapter 10. /// LeadingZeros can be used to simplify the calculation if the upper bits /// of the divided value are known zero. APInt::mu APInt::magicu(unsigned LeadingZeros) const { const APInt& d = *this; unsigned p; APInt nc, delta, q1, r1, q2, r2; struct mu magu; magu.a = 0; // initialize "add" indicator APInt allOnes = APInt::getAllOnesValue(d.getBitWidth()).lshr(LeadingZeros); APInt signedMin = APInt::getSignedMinValue(d.getBitWidth()); APInt signedMax = APInt::getSignedMaxValue(d.getBitWidth()); nc = allOnes - (allOnes - d).urem(d); p = d.getBitWidth() - 1; // initialize p q1 = signedMin.udiv(nc); // initialize q1 = 2p/nc r1 = signedMin - q1*nc; // initialize r1 = rem(2p,nc) q2 = signedMax.udiv(d); // initialize q2 = (2p-1)/d r2 = signedMax - q2*d; // initialize r2 = rem((2p-1),d) do { p = p + 1; if (r1.uge(nc - r1)) { q1 = q1 + q1 + 1; // update q1 r1 = r1 + r1 - nc; // update r1 } else { q1 = q1+q1; // update q1 r1 = r1+r1; // update r1 } if ((r2 + 1).uge(d - r2)) { if (q2.uge(signedMax)) magu.a = 1; q2 = q2+q2 + 1; // update q2 r2 = r2+r2 + 1 - d; // update r2 } else { if (q2.uge(signedMin)) magu.a = 1; q2 = q2+q2; // update q2 r2 = r2+r2 + 1; // update r2 } delta = d - 1 - r2; } while (p < d.getBitWidth()*2 && (q1.ult(delta) || (q1 == delta && r1 == 0))); magu.m = q2 + 1; // resulting magic number magu.s = p - d.getBitWidth(); // resulting shift return magu; } /// Implementation of Knuth's Algorithm D (Division of nonnegative integers) /// from "Art of Computer Programming, Volume 2", section 4.3.1, p. 272. The /// variables here have the same names as in the algorithm. Comments explain /// the algorithm and any deviation from it. static void KnuthDiv(unsigned *u, unsigned *v, unsigned *q, unsigned* r, unsigned m, unsigned n) { assert(u && "Must provide dividend"); assert(v && "Must provide divisor"); assert(q && "Must provide quotient"); assert(u != v && u != q && v != q && "Must use different memory"); assert(n>1 && "n must be > 1"); // b denotes the base of the number system. In our case b is 2^32. LLVM_CONSTEXPR uint64_t b = uint64_t(1) << 32; DEBUG(dbgs() << "KnuthDiv: m=" << m << " n=" << n << '\n'); DEBUG(dbgs() << "KnuthDiv: original:"); DEBUG(for (int i = m+n; i >=0; i--) dbgs() << " " << u[i]); DEBUG(dbgs() << " by"); DEBUG(for (int i = n; i >0; i--) dbgs() << " " << v[i-1]); DEBUG(dbgs() << '\n'); // D1. [Normalize.] Set d = b / (v[n-1] + 1) and multiply all the digits of // u and v by d. Note that we have taken Knuth's advice here to use a power // of 2 value for d such that d * v[n-1] >= b/2 (b is the base). A power of // 2 allows us to shift instead of multiply and it is easy to determine the // shift amount from the leading zeros. We are basically normalizing the u // and v so that its high bits are shifted to the top of v's range without // overflow. Note that this can require an extra word in u so that u must // be of length m+n+1. unsigned shift = countLeadingZeros(v[n-1]); unsigned v_carry = 0; unsigned u_carry = 0; if (shift) { for (unsigned i = 0; i < m+n; ++i) { unsigned u_tmp = u[i] >> (32 - shift); u[i] = (u[i] << shift) | u_carry; u_carry = u_tmp; } for (unsigned i = 0; i < n; ++i) { unsigned v_tmp = v[i] >> (32 - shift); v[i] = (v[i] << shift) | v_carry; v_carry = v_tmp; } } u[m+n] = u_carry; DEBUG(dbgs() << "KnuthDiv: normal:"); DEBUG(for (int i = m+n; i >=0; i--) dbgs() << " " << u[i]); DEBUG(dbgs() << " by"); DEBUG(for (int i = n; i >0; i--) dbgs() << " " << v[i-1]); DEBUG(dbgs() << '\n'); // D2. [Initialize j.] Set j to m. This is the loop counter over the places. int j = m; do { DEBUG(dbgs() << "KnuthDiv: quotient digit #" << j << '\n'); // D3. [Calculate q'.]. // Set qp = (u[j+n]*b + u[j+n-1]) / v[n-1]. (qp=qprime=q') // Set rp = (u[j+n]*b + u[j+n-1]) % v[n-1]. (rp=rprime=r') // Now test if qp == b or qp*v[n-2] > b*rp + u[j+n-2]; if so, decrease // qp by 1, inrease rp by v[n-1], and repeat this test if rp < b. The test // on v[n-2] determines at high speed most of the cases in which the trial // value qp is one too large, and it eliminates all cases where qp is two // too large. uint64_t dividend = ((uint64_t(u[j+n]) << 32) + u[j+n-1]); DEBUG(dbgs() << "KnuthDiv: dividend == " << dividend << '\n'); uint64_t qp = dividend / v[n-1]; uint64_t rp = dividend % v[n-1]; if (qp == b || qp*v[n-2] > b*rp + u[j+n-2]) { qp--; rp += v[n-1]; if (rp < b && (qp == b || qp*v[n-2] > b*rp + u[j+n-2])) qp--; } DEBUG(dbgs() << "KnuthDiv: qp == " << qp << ", rp == " << rp << '\n'); // D4. [Multiply and subtract.] Replace (u[j+n]u[j+n-1]...u[j]) with // (u[j+n]u[j+n-1]..u[j]) - qp * (v[n-1]...v[1]v[0]). This computation // consists of a simple multiplication by a one-place number, combined with // a subtraction. // The digits (u[j+n]...u[j]) should be kept positive; if the result of // this step is actually negative, (u[j+n]...u[j]) should be left as the // true value plus b**(n+1), namely as the b's complement of // the true value, and a "borrow" to the left should be remembered. int64_t borrow = 0; for (unsigned i = 0; i < n; ++i) { uint64_t p = uint64_t(qp) * uint64_t(v[i]); int64_t subres = int64_t(u[j+i]) - borrow - (unsigned)p; u[j+i] = (unsigned)subres; borrow = (p >> 32) - (subres >> 32); DEBUG(dbgs() << "KnuthDiv: u[j+i] = " << u[j+i] << ", borrow = " << borrow << '\n'); } bool isNeg = u[j+n] < borrow; u[j+n] -= (unsigned)borrow; DEBUG(dbgs() << "KnuthDiv: after subtraction:"); DEBUG(for (int i = m+n; i >=0; i--) dbgs() << " " << u[i]); DEBUG(dbgs() << '\n'); // D5. [Test remainder.] Set q[j] = qp. If the result of step D4 was // negative, go to step D6; otherwise go on to step D7. q[j] = (unsigned)qp; if (isNeg) { // D6. [Add back]. The probability that this step is necessary is very // small, on the order of only 2/b. Make sure that test data accounts for // this possibility. Decrease q[j] by 1 q[j]--; // and add (0v[n-1]...v[1]v[0]) to (u[j+n]u[j+n-1]...u[j+1]u[j]). // A carry will occur to the left of u[j+n], and it should be ignored // since it cancels with the borrow that occurred in D4. bool carry = false; for (unsigned i = 0; i < n; i++) { unsigned limit = std::min(u[j+i],v[i]); u[j+i] += v[i] + carry; carry = u[j+i] < limit || (carry && u[j+i] == limit); } u[j+n] += carry; } DEBUG(dbgs() << "KnuthDiv: after correction:"); DEBUG(for (int i = m+n; i >=0; i--) dbgs() << " " << u[i]); DEBUG(dbgs() << "\nKnuthDiv: digit result = " << q[j] << '\n'); // D7. [Loop on j.] Decrease j by one. Now if j >= 0, go back to D3. } while (--j >= 0); DEBUG(dbgs() << "KnuthDiv: quotient:"); DEBUG(for (int i = m; i >=0; i--) dbgs() <<" " << q[i]); DEBUG(dbgs() << '\n'); // D8. [Unnormalize]. Now q[...] is the desired quotient, and the desired // remainder may be obtained by dividing u[...] by d. If r is non-null we // compute the remainder (urem uses this). if (r) { // The value d is expressed by the "shift" value above since we avoided // multiplication by d by using a shift left. So, all we have to do is // shift right here. In order to mak if (shift) { unsigned carry = 0; DEBUG(dbgs() << "KnuthDiv: remainder:"); for (int i = n-1; i >= 0; i--) { r[i] = (u[i] >> shift) | carry; carry = u[i] << (32 - shift); DEBUG(dbgs() << " " << r[i]); } } else { for (int i = n-1; i >= 0; i--) { r[i] = u[i]; DEBUG(dbgs() << " " << r[i]); } } DEBUG(dbgs() << '\n'); } DEBUG(dbgs() << '\n'); } void APInt::divide(const APInt LHS, unsigned lhsWords, const APInt &RHS, unsigned rhsWords, APInt *Quotient, APInt *Remainder) { assert(lhsWords >= rhsWords && "Fractional result"); // First, compose the values into an array of 32-bit words instead of // 64-bit words. This is a necessity of both the "short division" algorithm // and the Knuth "classical algorithm" which requires there to be native // operations for +, -, and * on an m bit value with an m*2 bit result. We // can't use 64-bit operands here because we don't have native results of // 128-bits. Furthermore, casting the 64-bit values to 32-bit values won't // work on large-endian machines. uint64_t mask = ~0ull >> (sizeof(unsigned)*CHAR_BIT); unsigned n = rhsWords * 2; unsigned m = (lhsWords * 2) - n; // Allocate space for the temporary values we need either on the stack, if // it will fit, or on the heap if it won't. unsigned SPACE[128]; unsigned *U = nullptr; unsigned *V = nullptr; unsigned *Q = nullptr; unsigned *R = nullptr; if ((Remainder?4:3)*n+2*m+1 <= 128) { U = &SPACE[0]; V = &SPACE[m+n+1]; Q = &SPACE[(m+n+1) + n]; if (Remainder) R = &SPACE[(m+n+1) + n + (m+n)]; } else { U = new unsigned[m + n + 1]; V = new unsigned[n]; Q = new unsigned[m+n]; if (Remainder) R = new unsigned[n]; } // Initialize the dividend memset(U, 0, (m+n+1)*sizeof(unsigned)); for (unsigned i = 0; i < lhsWords; ++i) { uint64_t tmp = (LHS.getNumWords() == 1 ? LHS.VAL : LHS.pVal[i]); U[i * 2] = (unsigned)(tmp & mask); U[i * 2 + 1] = (unsigned)(tmp >> (sizeof(unsigned)*CHAR_BIT)); } U[m+n] = 0; // this extra word is for "spill" in the Knuth algorithm. // Initialize the divisor memset(V, 0, (n)*sizeof(unsigned)); for (unsigned i = 0; i < rhsWords; ++i) { uint64_t tmp = (RHS.getNumWords() == 1 ? RHS.VAL : RHS.pVal[i]); V[i * 2] = (unsigned)(tmp & mask); V[i * 2 + 1] = (unsigned)(tmp >> (sizeof(unsigned)*CHAR_BIT)); } // initialize the quotient and remainder memset(Q, 0, (m+n) * sizeof(unsigned)); if (Remainder) memset(R, 0, n * sizeof(unsigned)); // Now, adjust m and n for the Knuth division. n is the number of words in // the divisor. m is the number of words by which the dividend exceeds the // divisor (i.e. m+n is the length of the dividend). These sizes must not // contain any zero words or the Knuth algorithm fails. for (unsigned i = n; i > 0 && V[i-1] == 0; i--) { n--; m++; } for (unsigned i = m+n; i > 0 && U[i-1] == 0; i--) m--; // If we're left with only a single word for the divisor, Knuth doesn't work // so we implement the short division algorithm here. This is much simpler // and faster because we are certain that we can divide a 64-bit quantity // by a 32-bit quantity at hardware speed and short division is simply a // series of such operations. This is just like doing short division but we // are using base 2^32 instead of base 10. assert(n != 0 && "Divide by zero?"); if (n == 1) { unsigned divisor = V[0]; unsigned remainder = 0; for (int i = m+n-1; i >= 0; i--) { uint64_t partial_dividend = uint64_t(remainder) << 32 | U[i]; if (partial_dividend == 0) { Q[i] = 0; remainder = 0; } else if (partial_dividend < divisor) { Q[i] = 0; remainder = (unsigned)partial_dividend; } else if (partial_dividend == divisor) { Q[i] = 1; remainder = 0; } else { Q[i] = (unsigned)(partial_dividend / divisor); remainder = (unsigned)(partial_dividend - (Q[i] * divisor)); } } if (R) R[0] = remainder; } else { // Now we're ready to invoke the Knuth classical divide algorithm. In this // case n > 1. KnuthDiv(U, V, Q, R, m, n); } // If the caller wants the quotient if (Quotient) { // Set up the Quotient value's memory. if (Quotient->BitWidth != LHS.BitWidth) { if (Quotient->isSingleWord()) Quotient->VAL = 0; else delete [] Quotient->pVal; Quotient->BitWidth = LHS.BitWidth; if (!Quotient->isSingleWord()) Quotient->pVal = getClearedMemory(Quotient->getNumWords()); } else Quotient->clearAllBits(); // The quotient is in Q. Reconstitute the quotient into Quotient's low // order words. // This case is currently dead as all users of divide() handle trivial cases // earlier. if (lhsWords == 1) { uint64_t tmp = uint64_t(Q[0]) | (uint64_t(Q[1]) << (APINT_BITS_PER_WORD / 2)); if (Quotient->isSingleWord()) Quotient->VAL = tmp; else Quotient->pVal[0] = tmp; } else { assert(!Quotient->isSingleWord() && "Quotient APInt not large enough"); for (unsigned i = 0; i < lhsWords; ++i) Quotient->pVal[i] = uint64_t(Q[i*2]) | (uint64_t(Q[i*2+1]) << (APINT_BITS_PER_WORD / 2)); } } // If the caller wants the remainder if (Remainder) { // Set up the Remainder value's memory. if (Remainder->BitWidth != RHS.BitWidth) { if (Remainder->isSingleWord()) Remainder->VAL = 0; else delete [] Remainder->pVal; Remainder->BitWidth = RHS.BitWidth; if (!Remainder->isSingleWord()) Remainder->pVal = getClearedMemory(Remainder->getNumWords()); } else Remainder->clearAllBits(); // The remainder is in R. Reconstitute the remainder into Remainder's low // order words. if (rhsWords == 1) { uint64_t tmp = uint64_t(R[0]) | (uint64_t(R[1]) << (APINT_BITS_PER_WORD / 2)); if (Remainder->isSingleWord()) Remainder->VAL = tmp; else Remainder->pVal[0] = tmp; } else { assert(!Remainder->isSingleWord() && "Remainder APInt not large enough"); for (unsigned i = 0; i < rhsWords; ++i) Remainder->pVal[i] = uint64_t(R[i*2]) | (uint64_t(R[i*2+1]) << (APINT_BITS_PER_WORD / 2)); } } // Clean up the memory we allocated. if (U != &SPACE[0]) { delete [] U; delete [] V; delete [] Q; delete [] R; } } APInt APInt::udiv(const APInt& RHS) const { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); // First, deal with the easy case if (isSingleWord()) { assert(RHS.VAL != 0 && "Divide by zero?"); return APInt(BitWidth, VAL / RHS.VAL); } // Get some facts about the LHS and RHS number of bits and words unsigned rhsBits = RHS.getActiveBits(); unsigned rhsWords = !rhsBits ? 0 : (APInt::whichWord(rhsBits - 1) + 1); assert(rhsWords && "Divided by zero???"); unsigned lhsBits = this->getActiveBits(); unsigned lhsWords = !lhsBits ? 0 : (APInt::whichWord(lhsBits - 1) + 1); // Deal with some degenerate cases if (!lhsWords) // 0 / X ===> 0 return APInt(BitWidth, 0); else if (lhsWords < rhsWords || this->ult(RHS)) { // X / Y ===> 0, iff X < Y return APInt(BitWidth, 0); } else if (*this == RHS) { // X / X ===> 1 return APInt(BitWidth, 1); } else if (lhsWords == 1 && rhsWords == 1) { // All high words are zero, just use native divide return APInt(BitWidth, this->pVal[0] / RHS.pVal[0]); } // We have to compute it the hard way. Invoke the Knuth divide algorithm. APInt Quotient(1,0); // to hold result. divide(*this, lhsWords, RHS, rhsWords, &Quotient, nullptr); return Quotient; } APInt APInt::sdiv(const APInt &RHS) const { if (isNegative()) { if (RHS.isNegative()) return (-(*this)).udiv(-RHS); return -((-(*this)).udiv(RHS)); } if (RHS.isNegative()) return -(this->udiv(-RHS)); return this->udiv(RHS); } APInt APInt::urem(const APInt& RHS) const { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); if (isSingleWord()) { assert(RHS.VAL != 0 && "Remainder by zero?"); return APInt(BitWidth, VAL % RHS.VAL); } // Get some facts about the LHS unsigned lhsBits = getActiveBits(); unsigned lhsWords = !lhsBits ? 0 : (whichWord(lhsBits - 1) + 1); // Get some facts about the RHS unsigned rhsBits = RHS.getActiveBits(); unsigned rhsWords = !rhsBits ? 0 : (APInt::whichWord(rhsBits - 1) + 1); assert(rhsWords && "Performing remainder operation by zero ???"); // Check the degenerate cases if (lhsWords == 0) { // 0 % Y ===> 0 return APInt(BitWidth, 0); } else if (lhsWords < rhsWords || this->ult(RHS)) { // X % Y ===> X, iff X < Y return *this; } else if (*this == RHS) { // X % X == 0; return APInt(BitWidth, 0); } else if (lhsWords == 1) { // All high words are zero, just use native remainder return APInt(BitWidth, pVal[0] % RHS.pVal[0]); } // We have to compute it the hard way. Invoke the Knuth divide algorithm. APInt Remainder(1,0); divide(*this, lhsWords, RHS, rhsWords, nullptr, &Remainder); return Remainder; } APInt APInt::srem(const APInt &RHS) const { if (isNegative()) { if (RHS.isNegative()) return -((-(*this)).urem(-RHS)); return -((-(*this)).urem(RHS)); } if (RHS.isNegative()) return this->urem(-RHS); return this->urem(RHS); } void APInt::udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder) { assert(LHS.BitWidth == RHS.BitWidth && "Bit widths must be the same"); // First, deal with the easy case if (LHS.isSingleWord()) { assert(RHS.VAL != 0 && "Divide by zero?"); uint64_t QuotVal = LHS.VAL / RHS.VAL; uint64_t RemVal = LHS.VAL % RHS.VAL; Quotient = APInt(LHS.BitWidth, QuotVal); Remainder = APInt(LHS.BitWidth, RemVal); return; } // Get some size facts about the dividend and divisor unsigned lhsBits = LHS.getActiveBits(); unsigned lhsWords = !lhsBits ? 0 : (APInt::whichWord(lhsBits - 1) + 1); unsigned rhsBits = RHS.getActiveBits(); unsigned rhsWords = !rhsBits ? 0 : (APInt::whichWord(rhsBits - 1) + 1); // Check the degenerate cases if (lhsWords == 0) { Quotient = 0; // 0 / Y ===> 0 Remainder = 0; // 0 % Y ===> 0 return; } if (lhsWords < rhsWords || LHS.ult(RHS)) { Remainder = LHS; // X % Y ===> X, iff X < Y Quotient = 0; // X / Y ===> 0, iff X < Y return; } if (LHS == RHS) { Quotient = 1; // X / X ===> 1 Remainder = 0; // X % X ===> 0; return; } if (lhsWords == 1 && rhsWords == 1) { // There is only one word to consider so use the native versions. uint64_t lhsValue = LHS.isSingleWord() ? LHS.VAL : LHS.pVal[0]; uint64_t rhsValue = RHS.isSingleWord() ? RHS.VAL : RHS.pVal[0]; Quotient = APInt(LHS.getBitWidth(), lhsValue / rhsValue); Remainder = APInt(LHS.getBitWidth(), lhsValue % rhsValue); return; } // Okay, lets do it the long way divide(LHS, lhsWords, RHS, rhsWords, &Quotient, &Remainder); } void APInt::sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder) { if (LHS.isNegative()) { if (RHS.isNegative()) APInt::udivrem(-LHS, -RHS, Quotient, Remainder); else { APInt::udivrem(-LHS, RHS, Quotient, Remainder); Quotient = -Quotient; } Remainder = -Remainder; } else if (RHS.isNegative()) { APInt::udivrem(LHS, -RHS, Quotient, Remainder); Quotient = -Quotient; } else { APInt::udivrem(LHS, RHS, Quotient, Remainder); } } APInt APInt::sadd_ov(const APInt &RHS, bool &Overflow) const { APInt Res = *this+RHS; Overflow = isNonNegative() == RHS.isNonNegative() && Res.isNonNegative() != isNonNegative(); return Res; } APInt APInt::uadd_ov(const APInt &RHS, bool &Overflow) const { APInt Res = *this+RHS; Overflow = Res.ult(RHS); return Res; } APInt APInt::ssub_ov(const APInt &RHS, bool &Overflow) const { APInt Res = *this - RHS; Overflow = isNonNegative() != RHS.isNonNegative() && Res.isNonNegative() != isNonNegative(); return Res; } APInt APInt::usub_ov(const APInt &RHS, bool &Overflow) const { APInt Res = *this-RHS; Overflow = Res.ugt(*this); return Res; } APInt APInt::sdiv_ov(const APInt &RHS, bool &Overflow) const { // MININT/-1 --> overflow. Overflow = isMinSignedValue() && RHS.isAllOnesValue(); return sdiv(RHS); } APInt APInt::smul_ov(const APInt &RHS, bool &Overflow) const { APInt Res = *this * RHS; if (*this != 0 && RHS != 0) Overflow = Res.sdiv(RHS) != *this || Res.sdiv(*this) != RHS; else Overflow = false; return Res; } APInt APInt::umul_ov(const APInt &RHS, bool &Overflow) const { APInt Res = *this * RHS; if (*this != 0 && RHS != 0) Overflow = Res.udiv(RHS) != *this || Res.udiv(*this) != RHS; else Overflow = false; return Res; } APInt APInt::sshl_ov(const APInt &ShAmt, bool &Overflow) const { Overflow = ShAmt.uge(getBitWidth()); if (Overflow) return APInt(BitWidth, 0); if (isNonNegative()) // Don't allow sign change. Overflow = ShAmt.uge(countLeadingZeros()); else Overflow = ShAmt.uge(countLeadingOnes()); return *this << ShAmt; } APInt APInt::ushl_ov(const APInt &ShAmt, bool &Overflow) const { Overflow = ShAmt.uge(getBitWidth()); if (Overflow) return APInt(BitWidth, 0); Overflow = ShAmt.ugt(countLeadingZeros()); return *this << ShAmt; } void APInt::fromString(unsigned numbits, StringRef str, uint8_t radix) { // Check our assumptions here assert(!str.empty() && "Invalid string length"); assert((radix == 10 || radix == 8 || radix == 16 || radix == 2 || radix == 36) && "Radix should be 2, 8, 10, 16, or 36!"); StringRef::iterator p = str.begin(); size_t slen = str.size(); bool isNeg = *p == '-'; if (*p == '-' || *p == '+') { p++; slen--; assert(slen && "String is only a sign, needs a value."); } assert((slen <= numbits || radix != 2) && "Insufficient bit width"); assert(((slen-1)*3 <= numbits || radix != 8) && "Insufficient bit width"); assert(((slen-1)*4 <= numbits || radix != 16) && "Insufficient bit width"); assert((((slen-1)*64)/22 <= numbits || radix != 10) && "Insufficient bit width"); // Allocate memory if (!isSingleWord()) pVal = getClearedMemory(getNumWords()); // Figure out if we can shift instead of multiply unsigned shift = (radix == 16 ? 4 : radix == 8 ? 3 : radix == 2 ? 1 : 0); // Set up an APInt for the digit to add outside the loop so we don't // constantly construct/destruct it. APInt apdigit(getBitWidth(), 0); APInt apradix(getBitWidth(), radix); // Enter digit traversal loop for (StringRef::iterator e = str.end(); p != e; ++p) { unsigned digit = getDigit(*p, radix); assert(digit < radix && "Invalid character in digit string"); // Shift or multiply the value by the radix if (slen > 1) { if (shift) *this <<= shift; else *this *= apradix; } // Add in the digit we just interpreted if (apdigit.isSingleWord()) apdigit.VAL = digit; else apdigit.pVal[0] = digit; *this += apdigit; } // If its negative, put it in two's complement form if (isNeg) { --(*this); this->flipAllBits(); } } void APInt::toString(SmallVectorImpl<char> &Str, unsigned Radix, bool Signed, bool formatAsCLiteral) const { assert((Radix == 10 || Radix == 8 || Radix == 16 || Radix == 2 || Radix == 36) && "Radix should be 2, 8, 10, 16, or 36!"); const char *Prefix = ""; if (formatAsCLiteral) { switch (Radix) { case 2: // Binary literals are a non-standard extension added in gcc 4.3: // http://gcc.gnu.org/onlinedocs/gcc-4.3.0/gcc/Binary-constants.html Prefix = "0b"; break; case 8: Prefix = "0"; break; case 10: break; // No prefix case 16: Prefix = "0x"; break; default: llvm_unreachable("Invalid radix!"); } } // First, check for a zero value and just short circuit the logic below. if (*this == 0) { while (*Prefix) { Str.push_back(*Prefix); ++Prefix; }; Str.push_back('0'); return; } static const char Digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; if (isSingleWord()) { char Buffer[65]; char *BufPtr = Buffer+65; uint64_t N; if (!Signed) { N = getZExtValue(); } else { int64_t I = getSExtValue(); if (I >= 0) { N = I; } else { Str.push_back('-'); N = -(uint64_t)I; } } while (*Prefix) { Str.push_back(*Prefix); ++Prefix; }; while (N) { *--BufPtr = Digits[N % Radix]; N /= Radix; } Str.append(BufPtr, Buffer+65); return; } APInt Tmp(*this); if (Signed && isNegative()) { // They want to print the signed version and it is a negative value // Flip the bits and add one to turn it into the equivalent positive // value and put a '-' in the result. Tmp.flipAllBits(); ++Tmp; Str.push_back('-'); } while (*Prefix) { Str.push_back(*Prefix); ++Prefix; }; // We insert the digits backward, then reverse them to get the right order. unsigned StartDig = Str.size(); // For the 2, 8 and 16 bit cases, we can just shift instead of divide // because the number of bits per digit (1, 3 and 4 respectively) divides // equaly. We just shift until the value is zero. if (Radix == 2 || Radix == 8 || Radix == 16) { // Just shift tmp right for each digit width until it becomes zero unsigned ShiftAmt = (Radix == 16 ? 4 : (Radix == 8 ? 3 : 1)); unsigned MaskAmt = Radix - 1; while (Tmp != 0) { unsigned Digit = unsigned(Tmp.getRawData()[0]) & MaskAmt; Str.push_back(Digits[Digit]); Tmp = Tmp.lshr(ShiftAmt); } } else { APInt divisor(Radix == 10? 4 : 8, Radix); while (Tmp != 0) { APInt APdigit(1, 0); APInt tmp2(Tmp.getBitWidth(), 0); divide(Tmp, Tmp.getNumWords(), divisor, divisor.getNumWords(), &tmp2, &APdigit); unsigned Digit = (unsigned)APdigit.getZExtValue(); assert(Digit < Radix && "divide failed"); Str.push_back(Digits[Digit]); Tmp = tmp2; } } // Reverse the digits before returning. std::reverse(Str.begin()+StartDig, Str.end()); } /// Returns the APInt as a std::string. Note that this is an inefficient method. /// It is better to pass in a SmallVector/SmallString to the methods above. std::string APInt::toString(unsigned Radix = 10, bool Signed = true) const { SmallString<40> S; toString(S, Radix, Signed, /* formatAsCLiteral = */false); return S.str(); } void APInt::dump() const { SmallString<40> S, U; this->toStringUnsigned(U); this->toStringSigned(S); dbgs() << "APInt(" << BitWidth << "b, " << U << "u " << S << "s)"; } void APInt::print(raw_ostream &OS, bool isSigned) const { SmallString<40> S; this->toString(S, 10, isSigned, /* formatAsCLiteral = */false); OS << S; } // This implements a variety of operations on a representation of // arbitrary precision, two's-complement, bignum integer values. // Assumed by lowHalf, highHalf, partMSB and partLSB. A fairly safe // and unrestricting assumption. static_assert(integerPartWidth % 2 == 0, "Part width must be divisible by 2!"); /* Some handy functions local to this file. */ namespace { /* Returns the integer part with the least significant BITS set. BITS cannot be zero. */ static inline integerPart lowBitMask(unsigned int bits) { assert(bits != 0 && bits <= integerPartWidth); return ~(integerPart) 0 >> (integerPartWidth - bits); } /* Returns the value of the lower half of PART. */ static inline integerPart lowHalf(integerPart part) { return part & lowBitMask(integerPartWidth / 2); } /* Returns the value of the upper half of PART. */ static inline integerPart highHalf(integerPart part) { return part >> (integerPartWidth / 2); } /* Returns the bit number of the most significant set bit of a part. If the input number has no bits set -1U is returned. */ static unsigned int partMSB(integerPart value) { return findLastSet(value, ZB_Max); } /* Returns the bit number of the least significant set bit of a part. If the input number has no bits set -1U is returned. */ static unsigned int partLSB(integerPart value) { return findFirstSet(value, ZB_Max); } } /* Sets the least significant part of a bignum to the input value, and zeroes out higher parts. */ void APInt::tcSet(integerPart *dst, integerPart part, unsigned int parts) { unsigned int i; assert(parts > 0); dst[0] = part; for (i = 1; i < parts; i++) dst[i] = 0; } /* Assign one bignum to another. */ void APInt::tcAssign(integerPart *dst, const integerPart *src, unsigned int parts) { unsigned int i; for (i = 0; i < parts; i++) dst[i] = src[i]; } /* Returns true if a bignum is zero, false otherwise. */ bool APInt::tcIsZero(const integerPart *src, unsigned int parts) { unsigned int i; for (i = 0; i < parts; i++) if (src[i]) return false; return true; } /* Extract the given bit of a bignum; returns 0 or 1. */ int APInt::tcExtractBit(const integerPart *parts, unsigned int bit) { return (parts[bit / integerPartWidth] & ((integerPart) 1 << bit % integerPartWidth)) != 0; } /* Set the given bit of a bignum. */ void APInt::tcSetBit(integerPart *parts, unsigned int bit) { parts[bit / integerPartWidth] |= (integerPart) 1 << (bit % integerPartWidth); } /* Clears the given bit of a bignum. */ void APInt::tcClearBit(integerPart *parts, unsigned int bit) { parts[bit / integerPartWidth] &= ~((integerPart) 1 << (bit % integerPartWidth)); } /* Returns the bit number of the least significant set bit of a number. If the input number has no bits set -1U is returned. */ unsigned int APInt::tcLSB(const integerPart *parts, unsigned int n) { unsigned int i, lsb; for (i = 0; i < n; i++) { if (parts[i] != 0) { lsb = partLSB(parts[i]); return lsb + i * integerPartWidth; } } return -1U; } /* Returns the bit number of the most significant set bit of a number. If the input number has no bits set -1U is returned. */ unsigned int APInt::tcMSB(const integerPart *parts, unsigned int n) { unsigned int msb; do { --n; if (parts[n] != 0) { msb = partMSB(parts[n]); return msb + n * integerPartWidth; } } while (n); return -1U; } /* Copy the bit vector of width srcBITS from SRC, starting at bit srcLSB, to DST, of dstCOUNT parts, such that the bit srcLSB becomes the least significant bit of DST. All high bits above srcBITS in DST are zero-filled. */ void APInt::tcExtract(integerPart *dst, unsigned int dstCount,const integerPart *src, unsigned int srcBits, unsigned int srcLSB) { unsigned int firstSrcPart, dstParts, shift, n; dstParts = (srcBits + integerPartWidth - 1) / integerPartWidth; assert(dstParts <= dstCount); firstSrcPart = srcLSB / integerPartWidth; tcAssign (dst, src + firstSrcPart, dstParts); shift = srcLSB % integerPartWidth; tcShiftRight (dst, dstParts, shift); /* We now have (dstParts * integerPartWidth - shift) bits from SRC in DST. If this is less that srcBits, append the rest, else clear the high bits. */ n = dstParts * integerPartWidth - shift; if (n < srcBits) { integerPart mask = lowBitMask (srcBits - n); dst[dstParts - 1] |= ((src[firstSrcPart + dstParts] & mask) << n % integerPartWidth); } else if (n > srcBits) { if (srcBits % integerPartWidth) dst[dstParts - 1] &= lowBitMask (srcBits % integerPartWidth); } /* Clear high parts. */ while (dstParts < dstCount) dst[dstParts++] = 0; } /* DST += RHS + C where C is zero or one. Returns the carry flag. */ integerPart APInt::tcAdd(integerPart *dst, const integerPart *rhs, integerPart c, unsigned int parts) { unsigned int i; assert(c <= 1); for (i = 0; i < parts; i++) { integerPart l; l = dst[i]; if (c) { dst[i] += rhs[i] + 1; c = (dst[i] <= l); } else { dst[i] += rhs[i]; c = (dst[i] < l); } } return c; } /* DST -= RHS + C where C is zero or one. Returns the carry flag. */ integerPart APInt::tcSubtract(integerPart *dst, const integerPart *rhs, integerPart c, unsigned int parts) { unsigned int i; assert(c <= 1); for (i = 0; i < parts; i++) { integerPart l; l = dst[i]; if (c) { dst[i] -= rhs[i] + 1; c = (dst[i] >= l); } else { dst[i] -= rhs[i]; c = (dst[i] > l); } } return c; } /* Negate a bignum in-place. */ void APInt::tcNegate(integerPart *dst, unsigned int parts) { tcComplement(dst, parts); tcIncrement(dst, parts); } /* DST += SRC * MULTIPLIER + CARRY if add is true DST = SRC * MULTIPLIER + CARRY if add is false Requires 0 <= DSTPARTS <= SRCPARTS + 1. If DST overlaps SRC they must start at the same point, i.e. DST == SRC. If DSTPARTS == SRCPARTS + 1 no overflow occurs and zero is returned. Otherwise DST is filled with the least significant DSTPARTS parts of the result, and if all of the omitted higher parts were zero return zero, otherwise overflow occurred and return one. */ int APInt::tcMultiplyPart(integerPart *dst, const integerPart *src, integerPart multiplier, integerPart carry, unsigned int srcParts, unsigned int dstParts, bool add) { unsigned int i, n; /* Otherwise our writes of DST kill our later reads of SRC. */ assert(dst <= src || dst >= src + srcParts); assert(dstParts <= srcParts + 1); /* N loops; minimum of dstParts and srcParts. */ n = dstParts < srcParts ? dstParts: srcParts; for (i = 0; i < n; i++) { integerPart low, mid, high, srcPart; /* [ LOW, HIGH ] = MULTIPLIER * SRC[i] + DST[i] + CARRY. This cannot overflow, because (n - 1) * (n - 1) + 2 (n - 1) = (n - 1) * (n + 1) which is less than n^2. */ srcPart = src[i]; if (multiplier == 0 || srcPart == 0) { low = carry; high = 0; } else { low = lowHalf(srcPart) * lowHalf(multiplier); high = highHalf(srcPart) * highHalf(multiplier); mid = lowHalf(srcPart) * highHalf(multiplier); high += highHalf(mid); mid <<= integerPartWidth / 2; if (low + mid < low) high++; low += mid; mid = highHalf(srcPart) * lowHalf(multiplier); high += highHalf(mid); mid <<= integerPartWidth / 2; if (low + mid < low) high++; low += mid; /* Now add carry. */ if (low + carry < low) high++; low += carry; } if (add) { /* And now DST[i], and store the new low part there. */ if (low + dst[i] < low) high++; dst[i] += low; } else dst[i] = low; carry = high; } if (i < dstParts) { /* Full multiplication, there is no overflow. */ assert(i + 1 == dstParts); dst[i] = carry; return 0; } else { /* We overflowed if there is carry. */ if (carry) return 1; /* We would overflow if any significant unwritten parts would be non-zero. This is true if any remaining src parts are non-zero and the multiplier is non-zero. */ if (multiplier) for (; i < srcParts; i++) if (src[i]) return 1; /* We fitted in the narrow destination. */ return 0; } } /* DST = LHS * RHS, where DST has the same width as the operands and is filled with the least significant parts of the result. Returns one if overflow occurred, otherwise zero. DST must be disjoint from both operands. */ int APInt::tcMultiply(integerPart *dst, const integerPart *lhs, const integerPart *rhs, unsigned int parts) { unsigned int i; int overflow; assert(dst != lhs && dst != rhs); overflow = 0; tcSet(dst, 0, parts); for (i = 0; i < parts; i++) overflow |= tcMultiplyPart(&dst[i], lhs, rhs[i], 0, parts, parts - i, true); return overflow; } /* DST = LHS * RHS, where DST has width the sum of the widths of the operands. No overflow occurs. DST must be disjoint from both operands. Returns the number of parts required to hold the result. */ unsigned int APInt::tcFullMultiply(integerPart *dst, const integerPart *lhs, const integerPart *rhs, unsigned int lhsParts, unsigned int rhsParts) { /* Put the narrower number on the LHS for less loops below. */ if (lhsParts > rhsParts) { return tcFullMultiply (dst, rhs, lhs, rhsParts, lhsParts); } else { unsigned int n; assert(dst != lhs && dst != rhs); tcSet(dst, 0, rhsParts); for (n = 0; n < lhsParts; n++) tcMultiplyPart(&dst[n], rhs, lhs[n], 0, rhsParts, rhsParts + 1, true); n = lhsParts + rhsParts; return n - (dst[n - 1] == 0); } } /* If RHS is zero LHS and REMAINDER are left unchanged, return one. Otherwise set LHS to LHS / RHS with the fractional part discarded, set REMAINDER to the remainder, return zero. i.e. OLD_LHS = RHS * LHS + REMAINDER SCRATCH is a bignum of the same size as the operands and result for use by the routine; its contents need not be initialized and are destroyed. LHS, REMAINDER and SCRATCH must be distinct. */ int APInt::tcDivide(integerPart *lhs, const integerPart *rhs, integerPart *remainder, integerPart *srhs, unsigned int parts) { unsigned int n, shiftCount; integerPart mask; assert(lhs != remainder && lhs != srhs && remainder != srhs); shiftCount = tcMSB(rhs, parts) + 1; if (shiftCount == 0) return true; shiftCount = parts * integerPartWidth - shiftCount; n = shiftCount / integerPartWidth; mask = (integerPart) 1 << (shiftCount % integerPartWidth); tcAssign(srhs, rhs, parts); tcShiftLeft(srhs, parts, shiftCount); tcAssign(remainder, lhs, parts); tcSet(lhs, 0, parts); /* Loop, subtracting SRHS if REMAINDER is greater and adding that to the total. */ for (;;) { int compare; compare = tcCompare(remainder, srhs, parts); if (compare >= 0) { tcSubtract(remainder, srhs, 0, parts); lhs[n] |= mask; } if (shiftCount == 0) break; shiftCount--; tcShiftRight(srhs, parts, 1); if ((mask >>= 1) == 0) mask = (integerPart) 1 << (integerPartWidth - 1), n--; } return false; } /* Shift a bignum left COUNT bits in-place. Shifted in bits are zero. There are no restrictions on COUNT. */ void APInt::tcShiftLeft(integerPart *dst, unsigned int parts, unsigned int count) { if (count) { unsigned int jump, shift; /* Jump is the inter-part jump; shift is is intra-part shift. */ jump = count / integerPartWidth; shift = count % integerPartWidth; while (parts > jump) { integerPart part; parts--; /* dst[i] comes from the two parts src[i - jump] and, if we have an intra-part shift, src[i - jump - 1]. */ part = dst[parts - jump]; if (shift) { part <<= shift; if (parts >= jump + 1) part |= dst[parts - jump - 1] >> (integerPartWidth - shift); } dst[parts] = part; } while (parts > 0) dst[--parts] = 0; } } /* Shift a bignum right COUNT bits in-place. Shifted in bits are zero. There are no restrictions on COUNT. */ void APInt::tcShiftRight(integerPart *dst, unsigned int parts, unsigned int count) { if (count) { unsigned int i, jump, shift; /* Jump is the inter-part jump; shift is is intra-part shift. */ jump = count / integerPartWidth; shift = count % integerPartWidth; /* Perform the shift. This leaves the most significant COUNT bits of the result at zero. */ for (i = 0; i < parts; i++) { integerPart part; if (i + jump >= parts) { part = 0; } else { part = dst[i + jump]; if (shift) { part >>= shift; if (i + jump + 1 < parts) part |= dst[i + jump + 1] << (integerPartWidth - shift); } } dst[i] = part; } } } /* Bitwise and of two bignums. */ void APInt::tcAnd(integerPart *dst, const integerPart *rhs, unsigned int parts) { unsigned int i; for (i = 0; i < parts; i++) dst[i] &= rhs[i]; } /* Bitwise inclusive or of two bignums. */ void APInt::tcOr(integerPart *dst, const integerPart *rhs, unsigned int parts) { unsigned int i; for (i = 0; i < parts; i++) dst[i] |= rhs[i]; } /* Bitwise exclusive or of two bignums. */ void APInt::tcXor(integerPart *dst, const integerPart *rhs, unsigned int parts) { unsigned int i; for (i = 0; i < parts; i++) dst[i] ^= rhs[i]; } /* Complement a bignum in-place. */ void APInt::tcComplement(integerPart *dst, unsigned int parts) { unsigned int i; for (i = 0; i < parts; i++) dst[i] = ~dst[i]; } /* Comparison (unsigned) of two bignums. */ int APInt::tcCompare(const integerPart *lhs, const integerPart *rhs, unsigned int parts) { while (parts) { parts--; if (lhs[parts] == rhs[parts]) continue; if (lhs[parts] > rhs[parts]) return 1; else return -1; } return 0; } /* Increment a bignum in-place, return the carry flag. */ integerPart APInt::tcIncrement(integerPart *dst, unsigned int parts) { unsigned int i; for (i = 0; i < parts; i++) if (++dst[i] != 0) break; return i == parts; } /* Decrement a bignum in-place, return the borrow flag. */ integerPart APInt::tcDecrement(integerPart *dst, unsigned int parts) { for (unsigned int i = 0; i < parts; i++) { // If the current word is non-zero, then the decrement has no effect on the // higher-order words of the integer and no borrow can occur. Exit early. if (dst[i]--) return 0; } // If every word was zero, then there is a borrow. return 1; } /* Set the least significant BITS bits of a bignum, clear the rest. */ void APInt::tcSetLeastSignificantBits(integerPart *dst, unsigned int parts, unsigned int bits) { unsigned int i; i = 0; while (bits > integerPartWidth) { dst[i++] = ~(integerPart) 0; bits -= integerPartWidth; } if (bits) dst[i++] = ~(integerPart) 0 >> (integerPartWidth - bits); while (i < parts) dst[i++] = 0; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/SmallPtrSet.cpp
//===- llvm/ADT/SmallPtrSet.cpp - 'Normally small' pointer set ------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the SmallPtrSet class. See SmallPtrSet.h for an // overview of the algorithm. // //===----------------------------------------------------------------------===// #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/DenseMapInfo.h" #include "llvm/Support/MathExtras.h" #include <algorithm> #include <cstdlib> using namespace llvm; void SmallPtrSetImplBase::shrink_and_clear() { assert(!isSmall() && "Can't shrink a small set!"); delete[] CurArray; // HLSL Change: Use overridable operator delete // Reduce the number of buckets. unsigned Size = size(); CurArraySize = Size > 16 ? 1 << (Log2_32_Ceil(Size) + 1) : 32; NumNonEmpty = NumTombstones = 0; // Install the new array. Clear all the buckets to empty. CurArray = new const void*[CurArraySize]; // HLSL Change: Use overridable operator new assert(CurArray && "Failed to allocate memory?"); memset(CurArray, -1, CurArraySize*sizeof(void*)); } std::pair<const void *const *, bool> SmallPtrSetImplBase::insert_imp_big(const void *Ptr) { if (LLVM_UNLIKELY(size() * 4 >= CurArraySize * 3)) { // If more than 3/4 of the array is full, grow. Grow(CurArraySize < 64 ? 128 : CurArraySize * 2); } else if (LLVM_UNLIKELY(CurArraySize - NumNonEmpty < CurArraySize / 8)) { // If fewer of 1/8 of the array is empty (meaning that many are filled with // tombstones), rehash. Grow(CurArraySize); } // Okay, we know we have space. Find a hash bucket. const void **Bucket = const_cast<const void**>(FindBucketFor(Ptr)); if (*Bucket == Ptr) return std::make_pair(Bucket, false); // Already inserted, good. // Otherwise, insert it! if (*Bucket == getTombstoneMarker()) --NumTombstones; else ++NumNonEmpty; // Track density. *Bucket = Ptr; return std::make_pair(Bucket, true); } bool SmallPtrSetImplBase::erase_imp(const void * Ptr) { if (isSmall()) { // Check to see if it is in the set. for (const void **APtr = CurArray, **E = CurArray + NumNonEmpty; APtr != E; ++APtr) if (*APtr == Ptr) { // If it is in the set, replace this element. *APtr = getTombstoneMarker(); ++NumTombstones; return true; } return false; } // Okay, we know we have space. Find a hash bucket. void **Bucket = const_cast<void**>(FindBucketFor(Ptr)); if (*Bucket != Ptr) return false; // Not in the set? // Set this as a tombstone. *Bucket = getTombstoneMarker(); ++NumTombstones; return true; } const void * const *SmallPtrSetImplBase::FindBucketFor(const void *Ptr) const { unsigned Bucket = DenseMapInfo<void *>::getHashValue(Ptr) & (CurArraySize-1); unsigned ArraySize = CurArraySize; unsigned ProbeAmt = 1; const void *const *Array = CurArray; const void *const *Tombstone = nullptr; while (1) { // If we found an empty bucket, the pointer doesn't exist in the set. // Return a tombstone if we've seen one so far, or the empty bucket if // not. if (LLVM_LIKELY(Array[Bucket] == getEmptyMarker())) return Tombstone ? Tombstone : Array+Bucket; // Found Ptr's bucket? if (LLVM_LIKELY(Array[Bucket] == Ptr)) return Array+Bucket; // If this is a tombstone, remember it. If Ptr ends up not in the set, we // prefer to return it than something that would require more probing. if (Array[Bucket] == getTombstoneMarker() && !Tombstone) Tombstone = Array+Bucket; // Remember the first tombstone found. // It's a hash collision or a tombstone. Reprobe. Bucket = (Bucket + ProbeAmt++) & (ArraySize-1); } } /// Grow - Allocate a larger backing store for the buckets and move it over. /// void SmallPtrSetImplBase::Grow(unsigned NewSize) { const void **OldBuckets = CurArray; const void **OldEnd = EndPointer(); bool WasSmall = isSmall(); // Install the new array. Clear all the buckets to empty. CurArray = new const void*[NewSize]; // HLSL Change: Use overridable operator new assert(CurArray && "Failed to allocate memory?"); CurArraySize = NewSize; memset(CurArray, -1, NewSize*sizeof(void*)); // Copy over all valid entries. for (const void **BucketPtr = OldBuckets; BucketPtr != OldEnd; ++BucketPtr) { // Copy over the element if it is valid. const void *Elt = *BucketPtr; if (Elt != getTombstoneMarker() && Elt != getEmptyMarker()) *const_cast<void**>(FindBucketFor(Elt)) = const_cast<void*>(Elt); } if (!WasSmall) delete [] OldBuckets; NumNonEmpty -= NumTombstones; NumTombstones = 0; } SmallPtrSetImplBase::SmallPtrSetImplBase(const void **SmallStorage, const SmallPtrSetImplBase& that) { SmallArray = SmallStorage; // If we're becoming small, prepare to insert into our stack space if (that.isSmall()) { CurArray = SmallArray; // Otherwise, allocate new heap space (unless we were the same size) } else { CurArray = new const void*[that.CurArraySize]; // HLSL Change: Use overridable operator new assert(CurArray && "Failed to allocate memory?"); } // Copy over the that array. CopyHelper(that); } SmallPtrSetImplBase::SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize, SmallPtrSetImplBase &&that) { SmallArray = SmallStorage; MoveHelper(SmallSize, std::move(that)); } void SmallPtrSetImplBase::CopyFrom(const SmallPtrSetImplBase &RHS) { assert(&RHS != this && "Self-copy should be handled by the caller."); if (isSmall() && RHS.isSmall()) assert(CurArraySize == RHS.CurArraySize && "Cannot assign sets with different small sizes"); // If we're becoming small, prepare to insert into our stack space if (RHS.isSmall()) { if (!isSmall()) delete[] CurArray; // HLSL Change: Use overridable operator delete CurArray = SmallArray; // Otherwise, allocate new heap space (unless we were the same size) } else if (CurArraySize != RHS.CurArraySize) { if (isSmall()) CurArray = new const void*[RHS.CurArraySize]; // HLSL Change: Use overridable operator new else { // HLSL Change Begins: Use overridable operator new const void **T = new const void*[RHS.CurArraySize]; std::memcpy(T, CurArray, std::min(CurArraySize, RHS.CurArraySize)); delete[] CurArray; CurArray = T; // HLSL Change Ends } assert(CurArray && "Failed to allocate memory?"); } CopyHelper(RHS); } void SmallPtrSetImplBase::CopyHelper(const SmallPtrSetImplBase &RHS) { // Copy over the new array size CurArraySize = RHS.CurArraySize; // Copy over the contents from the other set std::copy(RHS.CurArray, RHS.EndPointer(), CurArray); NumNonEmpty = RHS.NumNonEmpty; NumTombstones = RHS.NumTombstones; } void SmallPtrSetImplBase::MoveFrom(unsigned SmallSize, SmallPtrSetImplBase &&RHS) { if (!isSmall()) delete[] CurArray; // HLSL Change: Use overridable operator delete MoveHelper(SmallSize, std::move(RHS)); } void SmallPtrSetImplBase::MoveHelper(unsigned SmallSize, SmallPtrSetImplBase &&RHS) { assert(&RHS != this && "Self-move should be handled by the caller."); if (RHS.isSmall()) { // Copy a small RHS rather than moving. CurArray = SmallArray; std::copy(RHS.CurArray, RHS.CurArray + RHS.NumNonEmpty, CurArray); } else { CurArray = RHS.CurArray; RHS.CurArray = RHS.SmallArray; } // Copy the rest of the trivial members. CurArraySize = RHS.CurArraySize; NumNonEmpty = RHS.NumNonEmpty; NumTombstones = RHS.NumTombstones; // Make the RHS small and empty. RHS.CurArraySize = SmallSize; assert(RHS.CurArray == RHS.SmallArray); RHS.NumNonEmpty = 0; RHS.NumTombstones = 0; } void SmallPtrSetImplBase::swap(SmallPtrSetImplBase &RHS) { if (this == &RHS) return; // We can only avoid copying elements if neither set is small. if (!this->isSmall() && !RHS.isSmall()) { std::swap(this->CurArray, RHS.CurArray); std::swap(this->CurArraySize, RHS.CurArraySize); std::swap(this->NumNonEmpty, RHS.NumNonEmpty); std::swap(this->NumTombstones, RHS.NumTombstones); return; } // FIXME: From here on we assume that both sets have the same small size. // If only RHS is small, copy the small elements into LHS and move the pointer // from LHS to RHS. if (!this->isSmall() && RHS.isSmall()) { assert(RHS.CurArray == RHS.SmallArray); std::copy(RHS.CurArray, RHS.CurArray + RHS.NumNonEmpty, this->SmallArray); std::swap(RHS.CurArraySize, this->CurArraySize); std::swap(this->NumNonEmpty, RHS.NumNonEmpty); std::swap(this->NumTombstones, RHS.NumTombstones); RHS.CurArray = this->CurArray; this->CurArray = this->SmallArray; return; } // If only LHS is small, copy the small elements into RHS and move the pointer // from RHS to LHS. if (this->isSmall() && !RHS.isSmall()) { assert(this->CurArray == this->SmallArray); std::copy(this->CurArray, this->CurArray + this->NumNonEmpty, RHS.SmallArray); std::swap(RHS.CurArraySize, this->CurArraySize); std::swap(RHS.NumNonEmpty, this->NumNonEmpty); std::swap(RHS.NumTombstones, this->NumTombstones); this->CurArray = RHS.CurArray; RHS.CurArray = RHS.SmallArray; return; } // Both a small, just swap the small elements. assert(this->isSmall() && RHS.isSmall()); unsigned MinNonEmpty = std::min(this->NumNonEmpty, RHS.NumNonEmpty); std::swap_ranges(this->SmallArray, this->SmallArray + MinNonEmpty, RHS.SmallArray); if (this->NumNonEmpty > MinNonEmpty) { std::copy(this->SmallArray + MinNonEmpty, this->SmallArray + this->NumNonEmpty, RHS.SmallArray + MinNonEmpty); } else { std::copy(RHS.SmallArray + MinNonEmpty, RHS.SmallArray + RHS.NumNonEmpty, this->SmallArray + MinNonEmpty); } assert(this->CurArraySize == RHS.CurArraySize); std::swap(this->NumNonEmpty, RHS.NumNonEmpty); std::swap(this->NumTombstones, RHS.NumTombstones); } SmallPtrSetImplBase::~SmallPtrSetImplBase() { if (!isSmall()) delete[] CurArray; // HLSL Change: Use overridable operator delete }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/regutils.h
/*- * This code is derived from OpenBSD's libc/regex, original license follows: * * Copyright (c) 1992, 1993, 1994 Henry Spencer. * Copyright (c) 1992, 1993, 1994 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Henry Spencer. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)utils.h 8.3 (Berkeley) 3/20/94 */ #ifndef LLVM_SUPPORT_REGUTILS_H #define LLVM_SUPPORT_REGUTILS_H #include <stdlib.h> /* utility definitions */ #define NC (CHAR_MAX - CHAR_MIN + 1) typedef unsigned char uch; /* switch off assertions (if not already off) if no REDEBUG */ #ifndef REDEBUG #ifndef NDEBUG #define NDEBUG /* no assertions please */ #endif #endif #include <assert.h> /* for old systems with bcopy() but no memmove() */ #ifdef USEBCOPY #define memmove(d, s, c) bcopy(s, d, c) #endif // HLSL Change Begin: Use custom allocators #ifdef __cplusplus extern "C" { #endif void* regex_malloc(size_t size); void* regex_calloc(size_t num, size_t size); // Realloc diverges from standard because we can't implement it in terms of new[]/delete[] void* regex_realloc(void* ptr, size_t oldsize, size_t newsize); void regex_free(void* ptr); #ifdef __cplusplus } #endif // HLSL Change Ends #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/regstrlcpy.c
/* * This code is derived from OpenBSD's libc, original license follows: * * Copyright (c) 1998 Todd C. Miller <[email protected]> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <sys/types.h> #include <string.h> #include "regex_impl.h" /* * Copy src to string dst of size siz. At most siz-1 characters * will be copied. Always NUL terminates (unless siz == 0). * Returns strlen(src); if retval >= siz, truncation occurred. */ size_t llvm_strlcpy(char *dst, const char *src, size_t siz) { char *d = dst; const char *s = src; size_t n = siz; // HLSL Change: not strictly needed, but makes OACR recognize that _z_ on outparam is upheld // and makes future changes more resilient if (n) *dst = '\0'; /* Copy as many bytes as will fit */ if (n != 0) { while (--n != 0) { if ((*d++ = *s++) == '\0') break; } } /* Not enough room in dst, add NUL and traverse rest of src */ if (n == 0) { if (siz != 0) *d = '\0'; /* NUL-terminate dst */ while (*s++) ; } return(s - src - 1); /* count does not include NUL */ }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/BlockFrequency.cpp
//====--------------- lib/Support/BlockFrequency.cpp -----------*- C++ -*-====// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements Block Frequency class. // //===----------------------------------------------------------------------===// #include "llvm/Support/BranchProbability.h" #include "llvm/Support/BlockFrequency.h" #include "llvm/Support/raw_ostream.h" #include <cassert> using namespace llvm; BlockFrequency &BlockFrequency::operator*=(const BranchProbability &Prob) { Frequency = Prob.scale(Frequency); return *this; } const BlockFrequency BlockFrequency::operator*(const BranchProbability &Prob) const { BlockFrequency Freq(Frequency); Freq *= Prob; return Freq; } BlockFrequency &BlockFrequency::operator/=(const BranchProbability &Prob) { Frequency = Prob.scaleByInverse(Frequency); return *this; } BlockFrequency BlockFrequency::operator/(const BranchProbability &Prob) const { BlockFrequency Freq(Frequency); Freq /= Prob; return Freq; } BlockFrequency &BlockFrequency::operator+=(const BlockFrequency &Freq) { uint64_t Before = Freq.Frequency; Frequency += Freq.Frequency; // If overflow, set frequency to the maximum value. if (Frequency < Before) Frequency = UINT64_MAX; return *this; } const BlockFrequency BlockFrequency::operator+(const BlockFrequency &Prob) const { BlockFrequency Freq(Frequency); Freq += Prob; return Freq; } BlockFrequency &BlockFrequency::operator>>=(const unsigned count) { // Frequency can never be 0 by design. assert(Frequency != 0); // Shift right by count. Frequency >>= count; // Saturate to 1 if we are 0. Frequency |= Frequency == 0; return *this; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/Regex.cpp
//===-- Regex.cpp - Regular Expression matcher implementation -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a POSIX regular expression matcher. // //===----------------------------------------------------------------------===// #include "llvm/Support/Regex.h" #include "regex_impl.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" #include <string> using namespace llvm; Regex::Regex(StringRef regex, unsigned Flags) { unsigned flags = 0; preg = new llvm_regex(); preg->re_endp = regex.end(); if (Flags & IgnoreCase) flags |= REG_ICASE; if (Flags & Newline) flags |= REG_NEWLINE; if (!(Flags & BasicRegex)) flags |= REG_EXTENDED; error = llvm_regcomp(preg, regex.data(), flags|REG_PEND); } Regex::~Regex() { if (preg) { llvm_regfree(preg); delete preg; } } bool Regex::isValid(std::string &Error) { if (!error) return true; size_t len = llvm_regerror(error, preg, nullptr, 0); Error.resize(len - 1); llvm_regerror(error, preg, &Error[0], len); return false; } /// getNumMatches - In a valid regex, return the number of parenthesized /// matches it contains. unsigned Regex::getNumMatches() const { return preg->re_nsub; } bool Regex::match(StringRef String, SmallVectorImpl<StringRef> *Matches){ unsigned nmatch = Matches ? preg->re_nsub+1 : 0; // pmatch needs to have at least one element. SmallVector<llvm_regmatch_t, 8> pm; pm.resize(nmatch > 0 ? nmatch : 1); pm[0].rm_so = 0; pm[0].rm_eo = String.size(); int rc = llvm_regexec(preg, String.data(), nmatch, pm.data(), REG_STARTEND); if (rc == REG_NOMATCH) return false; if (rc != 0) { // regexec can fail due to invalid pattern or running out of memory. error = rc; return false; } // There was a match. if (Matches) { // match position requested Matches->clear(); for (unsigned i = 0; i != nmatch; ++i) { if (pm[i].rm_so == -1) { // this group didn't match Matches->push_back(StringRef()); continue; } assert(pm[i].rm_eo >= pm[i].rm_so); Matches->push_back(StringRef(String.data()+pm[i].rm_so, pm[i].rm_eo-pm[i].rm_so)); } } return true; } std::string Regex::sub(StringRef Repl, StringRef String, std::string *Error) { SmallVector<StringRef, 8> Matches; // Reset error, if given. if (Error && !Error->empty()) *Error = ""; // Return the input if there was no match. if (!match(String, &Matches)) return String; // Otherwise splice in the replacement string, starting with the prefix before // the match. std::string Res(String.begin(), Matches[0].begin()); // Then the replacement string, honoring possible substitutions. while (!Repl.empty()) { // Skip to the next escape. std::pair<StringRef, StringRef> Split = Repl.split('\\'); // Add the skipped substring. Res += Split.first; // Check for terminimation and trailing backslash. if (Split.second.empty()) { if (Repl.size() != Split.first.size() && Error && Error->empty()) *Error = "replacement string contained trailing backslash"; break; } // Otherwise update the replacement string and interpret escapes. Repl = Split.second; // FIXME: We should have a StringExtras function for mapping C99 escapes. switch (Repl[0]) { // Treat all unrecognized characters as self-quoting. default: Res += Repl[0]; Repl = Repl.substr(1); break; // Single character escapes. case 't': Res += '\t'; Repl = Repl.substr(1); break; case 'n': Res += '\n'; Repl = Repl.substr(1); break; // Decimal escapes are backreferences. case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { // Extract the backreference number. StringRef Ref = Repl.slice(0, Repl.find_first_not_of("0123456789")); Repl = Repl.substr(Ref.size()); unsigned RefValue; if (!Ref.getAsInteger(10, RefValue) && RefValue < Matches.size()) Res += Matches[RefValue]; else if (Error && Error->empty()) *Error = ("invalid backreference string '" + Twine(Ref) + "'").str(); break; } } } // And finally the suffix. Res += StringRef(Matches[0].end(), String.end() - Matches[0].end()); return Res; } // These are the special characters matched in functions like "p_ere_exp". static const char RegexMetachars[] = "()^$|*+?.[]\\{}"; bool Regex::isLiteralERE(StringRef Str) { // Check for regex metacharacters. This list was derived from our regex // implementation in regcomp.c and double checked against the POSIX extended // regular expression specification. return Str.find_first_of(RegexMetachars) == StringRef::npos; } std::string Regex::escape(StringRef String) { std::string RegexStr; for (unsigned i = 0, e = String.size(); i != e; ++i) { if (strchr(RegexMetachars, String[i])) RegexStr += '\\'; RegexStr += String[i]; } return RegexStr; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/TimeValue.cpp
//===-- TimeValue.cpp - Implement OS TimeValue Concept ----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the operating system TimeValue concept. // //===----------------------------------------------------------------------===// #include "llvm/Support/TimeValue.h" #include "llvm/Config/config.h" namespace llvm { using namespace sys; const TimeValue::SecondsType TimeValue::PosixZeroTimeSeconds = -946684800; const TimeValue::SecondsType TimeValue::Win32ZeroTimeSeconds = -12591158400ULL; void TimeValue::normalize( void ) { if ( nanos_ >= NANOSECONDS_PER_SECOND ) { do { seconds_++; nanos_ -= NANOSECONDS_PER_SECOND; } while ( nanos_ >= NANOSECONDS_PER_SECOND ); } else if (nanos_ <= -NANOSECONDS_PER_SECOND ) { do { seconds_--; nanos_ += NANOSECONDS_PER_SECOND; } while (nanos_ <= -NANOSECONDS_PER_SECOND); } if (seconds_ >= 1 && nanos_ < 0) { seconds_--; nanos_ += NANOSECONDS_PER_SECOND; } else if (seconds_ < 0 && nanos_ > 0) { seconds_++; nanos_ -= NANOSECONDS_PER_SECOND; } } } /// Include the platform-specific portion of TimeValue class #ifdef LLVM_ON_UNIX #include "Unix/TimeValue.inc" #endif #ifdef LLVM_ON_WIN32 #include "Windows/TimeValue.inc" #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/Program.cpp
//===-- Program.cpp - Implement OS Program Concept --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the operating system Program concept. // //===----------------------------------------------------------------------===// #include "llvm/Support/Program.h" #include "llvm/ADT/StringRef.h" #include "llvm/Config/config.h" #include <system_error> using namespace llvm; using namespace sys; //===----------------------------------------------------------------------===// //=== WARNING: Implementation here must contain only TRULY operating system //=== independent code. //===----------------------------------------------------------------------===// #if defined(MSFT_SUPPORTS_CHILD_PROCESSES) || defined(LLVM_ON_UNIX) static bool Execute(ProcessInfo &PI, StringRef Program, const char **args, const char **env, const StringRef **Redirects, unsigned memoryLimit, std::string *ErrMsg); int sys::ExecuteAndWait(StringRef Program, const char **args, const char **envp, const StringRef **redirects, unsigned secondsToWait, unsigned memoryLimit, std::string *ErrMsg, bool *ExecutionFailed) { ProcessInfo PI; if (Execute(PI, Program, args, envp, redirects, memoryLimit, ErrMsg)) { if (ExecutionFailed) *ExecutionFailed = false; ProcessInfo Result = Wait( PI, secondsToWait, /*WaitUntilTerminates=*/secondsToWait == 0, ErrMsg); return Result.ReturnCode; } if (ExecutionFailed) *ExecutionFailed = true; return -1; } ProcessInfo sys::ExecuteNoWait(StringRef Program, const char **args, const char **envp, const StringRef **redirects, unsigned memoryLimit, std::string *ErrMsg, bool *ExecutionFailed) { ProcessInfo PI; if (ExecutionFailed) *ExecutionFailed = false; if (!Execute(PI, Program, args, envp, redirects, memoryLimit, ErrMsg)) if (ExecutionFailed) *ExecutionFailed = true; return PI; } #endif // MSFT_SUPPORTS_CHILD_PROCESSES // Include the platform-specific parts of this class. #ifdef LLVM_ON_UNIX #include "Unix/Program.inc" #endif #ifdef LLVM_ON_WIN32 #include "Windows/Program.inc" #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Support/StreamingMemoryObject.cpp
//===- StreamingMemoryObject.cpp - Streamable data interface -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Support/StreamingMemoryObject.h" #include <cassert> #include <cstddef> #include <cstring> using namespace llvm; namespace { class RawMemoryObject : public MemoryObject { public: RawMemoryObject(const unsigned char *Start, const unsigned char *End) : FirstChar(Start), LastChar(End) { assert(LastChar >= FirstChar && "Invalid start/end range"); } uint64_t getExtent() const override { return LastChar - FirstChar; } uint64_t readBytes(uint8_t *Buf, uint64_t Size, uint64_t Address) const override; const uint8_t *getPointer(uint64_t address, uint64_t size) const override; bool isValidAddress(uint64_t address) const override { return validAddress(address); } private: const uint8_t* const FirstChar; const uint8_t* const LastChar; // These are implemented as inline functions here to avoid multiple virtual // calls per public function bool validAddress(uint64_t address) const { return static_cast<std::ptrdiff_t>(address) < LastChar - FirstChar; } RawMemoryObject(const RawMemoryObject&) = delete; void operator=(const RawMemoryObject&) = delete; }; uint64_t RawMemoryObject::readBytes(uint8_t *Buf, uint64_t Size, uint64_t Address) const { uint64_t BufferSize = LastChar - FirstChar; if (Address >= BufferSize) return 0; uint64_t End = Address + Size; if (End > BufferSize) End = BufferSize; assert(static_cast<int64_t>(End - Address) >= 0); Size = End - Address; memcpy(Buf, Address + FirstChar, Size); return Size; } const uint8_t *RawMemoryObject::getPointer(uint64_t address, uint64_t size) const { return FirstChar + address; } } // anonymous namespace namespace llvm { // If the bitcode has a header, then its size is known, and we don't have to // block until we actually want to read it. bool StreamingMemoryObject::isValidAddress(uint64_t address) const { if (ObjectSize && address < ObjectSize) return true; return fetchToPos(address); } uint64_t StreamingMemoryObject::getExtent() const { if (ObjectSize) return ObjectSize; size_t pos = BytesRead + kChunkSize; // keep fetching until we run out of bytes while (fetchToPos(pos)) pos += kChunkSize; return ObjectSize; } uint64_t StreamingMemoryObject::readBytes(uint8_t *Buf, uint64_t Size, uint64_t Address) const { fetchToPos(Address + Size - 1); // Note: For wrapped bitcode files will set ObjectSize after the // first call to fetchToPos. In such cases, ObjectSize can be // smaller than BytesRead. size_t MaxAddress = (ObjectSize && ObjectSize < BytesRead) ? ObjectSize : BytesRead; if (Address >= MaxAddress) return 0; uint64_t End = Address + Size; if (End > MaxAddress) End = MaxAddress; assert(End >= Address); Size = End - Address; memcpy(Buf, &Bytes[Address + BytesSkipped], Size); return Size; } bool StreamingMemoryObject::dropLeadingBytes(size_t s) { if (BytesRead < s) return true; BytesSkipped = s; BytesRead -= s; return false; } void StreamingMemoryObject::setKnownObjectSize(size_t size) { ObjectSize = size; Bytes.reserve(size); if (ObjectSize <= BytesRead) EOFReached = true; } MemoryObject *getNonStreamedMemoryObject(const unsigned char *Start, const unsigned char *End) { return new RawMemoryObject(Start, End); } StreamingMemoryObject::StreamingMemoryObject( std::unique_ptr<DataStreamer> Streamer) : Bytes(kChunkSize), Streamer(std::move(Streamer)), BytesRead(0), BytesSkipped(0), ObjectSize(0), EOFReached(false) { BytesRead = this->Streamer->GetBytes(&Bytes[0], kChunkSize); } }